code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Union[str, Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ["""YolosFeatureExtractor"""]
lowercase : Optional[int] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : DatasetInfo ) -> Any:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_info.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfo.from_directory(__lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowercase , "dataset_info.json" ) )
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__lowercase )
_UpperCAmelCase = yaml.safe_load(__lowercase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : DatasetInfosDict ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(__lowercase )
dataset_infos_dict.write_to_directory(__lowercase )
_UpperCAmelCase = DatasetInfosDict.from_directory(__lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowercase , "README.md" ) )
| 22 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : List[Any] = 8
# DPR tok
UpperCAmelCase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
UpperCAmelCase : List[Any] = os.path.join(__snake_case , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase : Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase : str = {'''unk_token''': '''<unk>'''}
UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
UpperCAmelCase : Tuple = os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : Optional[Any] = os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def A ( self : str ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A ( self : List[Any] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A ( self : Dict ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def A ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_dummy_dataset()
UpperCAmelCase : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase : List[str] = dataset
UpperCAmelCase : str = RagRetriever(
__snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A ( self : List[Any] , __snake_case : bool ) -> List[str]:
UpperCAmelCase : Optional[int] = self.get_dummy_dataset()
UpperCAmelCase : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
UpperCAmelCase : int = os.path.join(self.tmpdirname , '''dataset''' )
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
UpperCAmelCase : str = RagRetriever(
__snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase : Union[str, Any] = RagRetriever(
__snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __snake_case ) , )
return retriever
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
UpperCAmelCase : int = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__snake_case , open(__snake_case , '''wb''' ) )
UpperCAmelCase : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
UpperCAmelCase : Dict = RagRetriever(
__snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = retriever.retrieve(__snake_case , n_docs=__snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A ( self : Any ) -> List[str]:
UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase : List[Any] = self.get_dummy_dataset()
retriever.save_pretrained(__snake_case )
UpperCAmelCase : Tuple = RagRetriever.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Optional[int] = retriever.retrieve(__snake_case , n_docs=1 )
self.assertTrue(out is not None )
def A ( self : Union[str, Any] ) -> Any:
UpperCAmelCase : Any = 1
UpperCAmelCase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__snake_case )
UpperCAmelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = retriever.retrieve(__snake_case , n_docs=__snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A ( self : Tuple ) -> Any:
UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__snake_case )
UpperCAmelCase : Dict = RagRetriever.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : List[Any] = retriever.retrieve(__snake_case , n_docs=1 )
self.assertTrue(out is not None )
def A ( self : Any ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : int = self.get_dummy_custom_hf_index_retriever(from_disk=__snake_case )
UpperCAmelCase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = retriever.retrieve(__snake_case , n_docs=__snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__snake_case )
UpperCAmelCase : Optional[Any] = RagRetriever.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : List[Any] = retriever.retrieve(__snake_case , n_docs=1 )
self.assertTrue(out is not None )
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : List[str] = 1
UpperCAmelCase : Union[str, Any] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = retriever.retrieve(__snake_case , n_docs=__snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __snake_case )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : int = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__snake_case )
UpperCAmelCase : int = RagRetriever.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
UpperCAmelCase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : int = retriever.retrieve(__snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A ( self : int ) -> Tuple:
import torch
UpperCAmelCase : List[str] = 1
UpperCAmelCase : List[str] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase : str = [[5, 7], [10, 11]]
UpperCAmelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Optional[int] = retriever(__snake_case , __snake_case , prefix=retriever.config.generator.prefix , n_docs=__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(__snake_case , np.ndarray )
UpperCAmelCase : Tuple = retriever(
__snake_case , __snake_case , prefix=retriever.config.generator.prefix , n_docs=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__snake_case , torch.Tensor )
self.assertIsInstance(__snake_case , torch.Tensor )
self.assertIsInstance(__snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__snake_case )
retriever.set_ctx_encoder_tokenizer(__snake_case )
UpperCAmelCase : Union[str, Any] = [[5, 7], [10, 11]]
UpperCAmelCase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase : Optional[int] = retriever(__snake_case , __snake_case , prefix=retriever.config.generator.prefix , n_docs=__snake_case )
self.assertEqual(
len(__snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __snake_case ) # check for doc token related keys in dictionary.
| 23 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
snake_case_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Tuple:
__snake_case = {}
with open(snake_case_ , '''r''' ) as file:
for line_number, line in enumerate(snake_case_ ):
__snake_case = line.strip()
if line:
__snake_case = line.split()
__snake_case = line_number
__snake_case = words[0]
__snake_case = value
return result
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ) -> List[Any]:
for attribute in key.split('''.''' ):
__snake_case = getattr(snake_case_ , snake_case_ )
__snake_case = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case_ ):
__snake_case = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case = getattr(snake_case_ , snake_case_ ).shape
elif weight_type is not None and weight_type == "param":
__snake_case = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case = getattr(snake_case_ , snake_case_ )
__snake_case = shape_pointer.shape
# let's reduce dimension
__snake_case = value[0]
else:
__snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case = getattr(snake_case_ , snake_case_ )
__snake_case = value
else:
__snake_case = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[str] ) -> str:
__snake_case = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case_ ):
__snake_case = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case = '''.'''.join([key, hf_param_name] )
else:
__snake_case = key
__snake_case = value if '''lm_head''' in full_key else value[0]
snake_case_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None ) -> str:
__snake_case = False
for key, mapped_key in MAPPING.items():
__snake_case = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(snake_case_ )[0].split('''.''' )[-2]
__snake_case = mapped_key.replace('''*''' , snake_case_ )
if "weight_g" in name:
__snake_case = '''weight_g'''
elif "weight_v" in name:
__snake_case = '''weight_v'''
elif "bias" in name:
__snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = '''weight'''
else:
__snake_case = None
if hf_dict is not None:
rename_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return is_used
return is_used
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> int:
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case = True
else:
__snake_case = load_wavaveca_layer(snake_case_ , snake_case_ , snake_case_ )
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> List[Any]:
__snake_case = full_name.split('''conv_layers.''' )[-1]
__snake_case = name.split('''.''' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int=None , snake_case_ : Any=None , snake_case_ : Any=True , snake_case_ : Any=False ) -> List[str]:
if config_path is not None:
__snake_case = WavaVecaConfig.from_pretrained(snake_case_ )
else:
__snake_case = WavaVecaConfig()
if is_seq_class:
__snake_case = read_txt_into_dict(snake_case_ )
__snake_case = idalabel
__snake_case = WavaVecaForSequenceClassification(snake_case_ )
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
feature_extractor.save_pretrained(snake_case_ )
elif is_finetuned:
if dict_path:
__snake_case = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(snake_case_ , '''vocab.json''' )
if not os.path.isdir(snake_case_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case = 0
__snake_case = 1
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(snake_case_ , snake_case_ )
__snake_case = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case_ , )
__snake_case = True if config.feat_extract_norm == '''layer''' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
__snake_case = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__snake_case = WavaVecaForCTC(snake_case_ )
else:
__snake_case = WavaVecaForPreTraining(snake_case_ )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case = argparse.Namespace(task='''audio_pretraining''' )
__snake_case = fairseq.tasks.setup_task(snake_case_ )
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case_ )
__snake_case = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
snake_case_ = parser.parse_args()
snake_case_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : int = len(snake_case_ )
_A : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_A : list = []
for char_count in range(snake_case_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 26 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__()
__a : str = nn.ModuleList(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a = None , __a = None , __a = None , __a = None , __a = False , __a = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__a , __a , self.nets ) ):
__a , __a : Any = controlnet(
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , )
# merge samples
if i == 0:
__a , __a : Optional[int] = down_samples, mid_sample
else:
__a : Dict = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__a , __a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCAmelCase ( self , __a , __a = True , __a = None , __a = False , __a = None , ):
'''simple docstring'''
__a : List[Any] = 0
__a : List[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__a , is_main_process=__a , save_function=__a , safe_serialization=__a , variant=__a , )
idx += 1
__a : List[str] = model_path_to_save + f"""_{idx}"""
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
__a : List[str] = 0
__a : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__a : Union[str, Any] = pretrained_model_path
while os.path.isdir(__a ):
__a : Any = ControlNetModel.from_pretrained(__a , **__a )
controlnets.append(__a )
idx += 1
__a : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__a )} controlnets loaded from {pretrained_model_path}.""" )
if len(__a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__a )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__a )
| 27 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = [0] * len(snake_case__ )
lowercase_ = []
lowercase_ = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
lowercase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
__a = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 30 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = """Alexander Joslin"""
import operator as op
from .stack import Stack
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCAmelCase : Stack[int] = Stack()
_UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_UpperCAmelCase )
elif i == ")":
# RULE 4
_UpperCAmelCase : str = operator_stack.peek()
operator_stack.pop()
_UpperCAmelCase : List[str] = operand_stack.peek()
operand_stack.pop()
_UpperCAmelCase : List[str] = operand_stack.peek()
operand_stack.pop()
_UpperCAmelCase : List[Any] = operators[opr](_UpperCAmelCase , _UpperCAmelCase )
operand_stack.push(_UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 31 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : Any=3_7 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Tuple=2 , ) -> List[str]:
a_ : List[Any] = parent
a_ : int = batch_size
a_ : Optional[int] = image_size
a_ : Union[str, Any] = patch_size
a_ : Optional[int] = num_channels
a_ : Tuple = is_training
a_ : Dict = use_labels
a_ : List[str] = hidden_size
a_ : Dict = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : int = intermediate_size
a_ : Optional[Any] = hidden_act
a_ : Tuple = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Tuple = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : Dict = scope
a_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a_ : Optional[int] = (image_size // patch_size) ** 2
a_ : Dict = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Optional[Any] = None
if self.use_labels:
a_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
a_ : Optional[int] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__ )
a_ : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
a_ : str = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a_ : Optional[Any] = 1
a_ : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
a_ : Dict = self.type_sequence_label_size
a_ : Optional[int] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : Any = 1
a_ : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
a_ : List[str] = self.prepare_config_and_inputs()
a_ , a_ , a_ : Tuple = config_and_inputs
a_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case__ : str = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Dict = False
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
a_ : str = TFDeiTModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Dense ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Any = [*signature.parameters.keys()]
a_ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False ) -> List[str]:
a_ : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : int = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
a_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : Dict = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
a_ : List[Any] = self.default_image_processor
a_ : str = prepare_img()
a_ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
# forward pass
a_ : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
a_ : Tuple = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 32 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _a ( __a ):
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(lowercase , '''num_encoder_blocks''' ) )
class _a :
def __init__( self : Optional[Any] , lowercase : Any , lowercase : Optional[Any]=13 , lowercase : Any=64 , lowercase : Any=3 , lowercase : int=4 , lowercase : Any=[2, 2, 2, 2] , lowercase : Optional[int]=[8, 4, 2, 1] , lowercase : int=[16, 32, 64, 128] , lowercase : Dict=[1, 4, 8, 16] , lowercase : List[Any]=[1, 2, 4, 8] , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="gelu" , lowercase : Any=0.1 , lowercase : Optional[int]=0.1 , lowercase : Optional[Any]=0.02 , lowercase : Optional[int]=3 , lowercase : Optional[int]=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = num_encoder_blocks
UpperCAmelCase = sr_ratios
UpperCAmelCase = depths
UpperCAmelCase = hidden_sizes
UpperCAmelCase = downsampling_rates
UpperCAmelCase = num_attention_heads
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = scope
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Any , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = SegformerModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase )
UpperCAmelCase = UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A ( self : Optional[int] , lowercase : Dict , lowercase : Dict , lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = SegformerForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self : Dict , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = 1
UpperCAmelCase = SegformerForSemanticSegmentation(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowercase )
UpperCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
__a : Optional[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__a : Dict = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = True
__a : str = False
__a : Dict = False
__a : int = False
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = SegformerModelTester(self )
UpperCAmelCase = SegformerConfigTester(self , config_class=lowercase )
def A ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def A ( self : str ):
'''simple docstring'''
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
UpperCAmelCase = outputs.attentions
UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(lowercase ) , lowercase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowercase ) , lowercase )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCAmelCase = len(lowercase )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + 1 , len(lowercase ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowercase ) , lowercase )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : Dict , lowercase : List[str] , lowercase : Optional[Any] ):
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase ) , lowercase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase ):
continue
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.train()
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
UpperCAmelCase = model(**lowercase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Optional[Any] ):
'''simple docstring'''
pass
@slow
def A ( self : List[str] ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SegformerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def snake_case_ ():
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class _a ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
lowercase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase , return_tensors='''pt''' )
UpperCAmelCase = encoded_inputs.pixel_values.to(lowercase )
with torch.no_grad():
UpperCAmelCase = model(lowercase )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowercase )
UpperCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(lowercase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase , return_tensors='''pt''' )
UpperCAmelCase = encoded_inputs.pixel_values.to(lowercase )
with torch.no_grad():
UpperCAmelCase = model(lowercase )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowercase )
UpperCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1E-1 ) )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
lowercase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase , return_tensors='''pt''' )
UpperCAmelCase = encoded_inputs.pixel_values.to(lowercase )
with torch.no_grad():
UpperCAmelCase = model(lowercase )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(500, 300)] )
UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowercase )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowercase )
UpperCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowercase )
| 34 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "gpt_bigcode"
lowercase = ["past_key_values"]
lowercase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , snake_case_ : str=50_257 , snake_case_ : Tuple=1_024 , snake_case_ : Union[str, Any]=768 , snake_case_ : List[Any]=12 , snake_case_ : Optional[int]=12 , snake_case_ : Dict=None , snake_case_ : Optional[Any]="gelu_pytorch_tanh" , snake_case_ : Dict=0.1 , snake_case_ : int=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Dict=1E-5 , snake_case_ : Optional[int]=0.02 , snake_case_ : Dict=True , snake_case_ : List[str]=True , snake_case_ : Optional[Any]=50_256 , snake_case_ : Any=50_256 , snake_case_ : List[str]=True , snake_case_ : Optional[Any]=True , snake_case_ : Any=True , **snake_case_ : int , ):
snake_case__ : Optional[Any] = vocab_size
snake_case__ : Tuple = n_positions
snake_case__ : Any = n_embd
snake_case__ : List[str] = n_layer
snake_case__ : Union[str, Any] = n_head
snake_case__ : Optional[int] = n_inner
snake_case__ : Any = activation_function
snake_case__ : List[Any] = resid_pdrop
snake_case__ : Tuple = embd_pdrop
snake_case__ : Tuple = attn_pdrop
snake_case__ : Optional[int] = layer_norm_epsilon
snake_case__ : Dict = initializer_range
snake_case__ : Dict = scale_attn_weights
snake_case__ : Optional[Any] = use_cache
snake_case__ : str = attention_softmax_in_fpaa
snake_case__ : List[Any] = scale_attention_softmax_in_fpaa
snake_case__ : Tuple = multi_query
snake_case__ : Optional[int] = bos_token_id
snake_case__ : int = eos_token_id
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
| 35 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,)
assert hasattr(self ,"""env""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
# create estimator
lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
| 37 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase_ : Tuple = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase_ : List[str] = [0, 25, 50]
UpperCAmelCase_ : Dict = [25, 50, 75]
UpperCAmelCase_ : Tuple = fuzz.membership.trimf(X, abca)
UpperCAmelCase_ : str = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase_ : Tuple = np.ones(75)
UpperCAmelCase_ : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase_ : Tuple = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase_ : str = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase_ : int = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase_ : int = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase_ : Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase_ : Optional[int] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase_ : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase_ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 38 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=0 , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = projection_dim
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
_UpperCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRContextEncoder(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRQuestionEncoder(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFDPRReader(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFDPRModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFDPRReader.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_UpperCAmelCase = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_UpperCAmelCase = model(UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCAmelCase = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 39 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Optional[int] = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a : Any = True if "large" in model_name or "huge" in model_name else False
a : Any = True if "large" in model_name or "huge" in model_name else False
a : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a : Optional[int] = [3, 3, 3, 3]
a : Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
a : List[str] = [4, 4, 4, 4]
a : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a : Tuple = [3, 3, 3, 3]
if "lrf" in model_name:
a : Any = [3, 3, 3, 3]
else:
a : str = [2, 2, 2, 2]
if "tiny" in model_name:
a : Optional[Any] = 96
elif "small" in model_name:
a : List[Any] = 96
elif "base" in model_name:
a : Tuple = 128
elif "large" in model_name:
a : Optional[Any] = 192
elif "xlarge" in model_name:
a : Dict = 256
elif "huge" in model_name:
a : Optional[int] = 352
# set label information
a : List[str] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a : Any = "imagenet-22k-id2label.json"
else:
a : Union[str, Any] = "imagenet-1k-id2label.json"
a : Any = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Optional[Any] = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = {v: k for k, v in idalabel.items()}
a : List[Any] = FocalNetConfig(
embed_dim=A_ , depths=A_ , focal_levels=A_ , focal_windows=A_ , use_conv_embed=A_ , idalabel=A_ , labelaid=A_ , use_post_layernorm=A_ , use_layerscale=A_ , )
return config
def lowercase ( A_ )-> str:
'''simple docstring'''
if "patch_embed.proj" in name:
a : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : str = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : List[Any] = "encoder." + name
if "encoder.layers" in name:
a : Any = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a : Tuple = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a : List[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a : Optional[int] = "layernorm.weight"
if name == "norm.bias":
a : Optional[int] = "layernorm.bias"
if "head" in name:
a : Union[str, Any] = name.replace("head" , "classifier" )
else:
a : str = "focalnet." + name
return name
def lowercase ( A_ , A_ , A_=False )-> Tuple:
'''simple docstring'''
a : Dict = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a : List[str] = model_name_to_url[model_name]
print("Checkpoint URL: " , A_ )
a : str = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a : int = state_dict.pop(A_ )
a : Dict = val
a : Optional[int] = get_focalnet_config(A_ )
a : str = FocalNetForImageClassification(A_ )
model.eval()
# load state dict
model.load_state_dict(A_ )
# verify conversion
a : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Any = BitImageProcessor(
do_resize=A_ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A_ , crop_size=224 , do_normalize=A_ , image_mean=A_ , image_std=A_ , )
a : str = Image.open(requests.get(A_ , stream=A_ ).raw )
a : Optional[int] = processor(images=A_ , return_tensors="pt" )
a : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
a : Optional[Any] = image_transforms(A_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , A_ , atol=1e-4 )
a : Optional[Any] = model(**A_ )
a : Any = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
a : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
a : Union[str, Any] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
a : str = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
a : Tuple = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
a : Dict = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
__lowercase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
'''simple docstring'''
_A : List[str] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Dict = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCamelCase )
lowerCamelCase__ : int = """""".join(bin(UpperCamelCase )[2:].zfill(8 ) for byte in data )
lowerCamelCase__ : int = len(UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCamelCase__ : Optional[Any] = b"""=""" * ((6 - len(UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCamelCase ) % 6)
else:
lowerCamelCase__ : Optional[int] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : int = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCamelCase , UpperCamelCase ):
try:
lowerCamelCase__ : str = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowerCamelCase__ : Tuple = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCamelCase__ : Any = encoded_data[:-padding]
lowerCamelCase__ : int = """""".join(
bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCamelCase__ : Tuple = """""".join(
bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
lowerCamelCase__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCamelCase ) , 8 )
]
return bytes(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowercase : List[str] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_snake_case = Github(os.environ['GITHUB_TOKEN'] )
_snake_case = g.get_repo('huggingface/diffusers' )
_snake_case = repo.get_issues(state='open' )
for issue in open_issues:
_snake_case = sorted(issue.get_comments() , key=lambda __A : i.created_at , reverse=__A )
_snake_case = comments[0] if len(__A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 42 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowercase = '''true'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=82 , SCREAMING_SNAKE_CASE=16 ):
'''simple docstring'''
set_seed(42 )
__UpperCamelCase :List[str] = RegressionModel()
__UpperCamelCase :List[Any] = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
__UpperCamelCase , __UpperCamelCase :Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__UpperCamelCase :Optional[int] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
__UpperCamelCase :Any = dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__UpperCamelCase :Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=16 )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE , split_batches=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = get_dataloader(SCREAMING_SNAKE_CASE , not dispatch_batches )
__UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = []
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase :Tuple = batch.values()
with torch.no_grad():
__UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__UpperCamelCase , __UpperCamelCase :List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE )
targs.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :str = torch.cat(SCREAMING_SNAKE_CASE ), torch.cat(SCREAMING_SNAKE_CASE )
return logits, targs
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=82 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=16 ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = get_basic_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :List[Any] = generate_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert (
len(SCREAMING_SNAKE_CASE ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE )}"""
def lowerCamelCase ( SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
__UpperCamelCase :Dict = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase , __UpperCamelCase :Dict = get_mrpc_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# First do baseline
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = setup['''no''']
model.to(SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE )
with torch.inference_mode():
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE , references=batch['''labels'''] )
__UpperCamelCase :Tuple = metric.compute()
# Then do distributed
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = outputs.logits.argmax(dim=-1 )
__UpperCamelCase :Optional[Any] = batch['''labels''']
__UpperCamelCase , __UpperCamelCase :Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = Accelerator(split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__UpperCamelCase :Union[str, Any] = Accelerator(split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__UpperCamelCase :Union[str, Any] = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 43 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any]=0.9_99 ,_lowerCamelCase : Optional[int]="cosine" ,) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_lowerCAmelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = i / num_diffusion_timesteps
_lowerCAmelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) ,_lowerCamelCase ) )
return torch.tensor(_lowerCamelCase ,dtype=torch.floataa )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
_UpperCamelCase : Dict = 2
@register_to_config
def __init__( self , a__ = 1000 , a__ = 0.0_0_0_8_5 , a__ = 0.0_1_2 , a__ = "linear" , a__ = None , a__ = "epsilon" , a__ = "linspace" , a__ = 0 , ):
if trained_betas is not None:
_lowerCAmelCase : str = torch.tensor(a__ , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase : Optional[Any] = torch.linspace(a__ , a__ , a__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase : Optional[Any] = betas_for_alpha_bar(a__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
_lowerCAmelCase : List[str] = 1.0 - self.betas
_lowerCAmelCase : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a__ , a__ , a__ )
def __A ( self , a__ , a__=None ):
if schedule_timesteps is None:
_lowerCAmelCase : Dict = self.timesteps
_lowerCAmelCase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCAmelCase : List[str] = 1 if len(a__ ) > 1 else 0
else:
_lowerCAmelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
_lowerCAmelCase : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __A ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __A ( self , a__ , a__ , ):
_lowerCAmelCase : Union[str, Any] = self.index_for_timestep(a__ )
if self.state_in_first_order:
_lowerCAmelCase : Optional[int] = self.sigmas[step_index]
else:
_lowerCAmelCase : Optional[Any] = self.sigmas_interpol[step_index]
_lowerCAmelCase : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __A ( self , a__ , a__ = None , a__ = None , ):
_lowerCAmelCase : List[Any] = num_inference_steps
_lowerCAmelCase : Tuple = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCAmelCase : List[str] = np.linspace(0 , num_train_timesteps - 1 , a__ , dtype=a__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCAmelCase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : List[str] = (np.arange(0 , a__ ) * step_ratio).round()[::-1].copy().astype(a__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCAmelCase : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : str = (np.arange(a__ , 0 , -step_ratio )).round().copy().astype(a__ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
_lowerCAmelCase : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCAmelCase : List[str] = torch.from_numpy(np.log(a__ ) ).to(a__ )
_lowerCAmelCase : List[str] = np.interp(a__ , np.arange(0 , len(a__ ) ) , a__ )
_lowerCAmelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCAmelCase : Any = torch.from_numpy(a__ ).to(device=a__ )
# interpolate sigmas
_lowerCAmelCase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_lowerCAmelCase : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCAmelCase : Union[str, Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(a__ ).startswith("""mps""" ):
# mps does not support float64
_lowerCAmelCase : List[Any] = torch.from_numpy(a__ ).to(a__ , dtype=torch.floataa )
else:
_lowerCAmelCase : List[str] = torch.from_numpy(a__ ).to(a__ )
# interpolate timesteps
_lowerCAmelCase : List[Any] = self.sigma_to_t(a__ ).to(a__ , dtype=timesteps.dtype )
_lowerCAmelCase : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_lowerCAmelCase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowerCAmelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCAmelCase : Tuple = defaultdict(a__ )
def __A ( self , a__ ):
# get log sigma
_lowerCAmelCase : str = sigma.log()
# get distribution
_lowerCAmelCase : List[str] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCAmelCase : Optional[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowerCAmelCase : List[str] = low_idx + 1
_lowerCAmelCase : str = self.log_sigmas[low_idx]
_lowerCAmelCase : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCAmelCase : List[Any] = (low - log_sigma) / (low - high)
_lowerCAmelCase : List[str] = w.clamp(0 , 1 )
# transform interpolation to time range
_lowerCAmelCase : Optional[Any] = (1 - w) * low_idx + w * high_idx
_lowerCAmelCase : Optional[int] = t.view(sigma.shape )
return t
@property
def __A ( self ):
return self.sample is None
def __A ( self , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : List[str] = self.index_for_timestep(a__ )
# advance index counter by 1
_lowerCAmelCase : str = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCAmelCase : Tuple = self.sigmas[step_index]
_lowerCAmelCase : Optional[int] = self.sigmas_interpol[step_index + 1]
_lowerCAmelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCAmelCase : int = self.sigmas[step_index - 1]
_lowerCAmelCase : Any = self.sigmas_interpol[step_index]
_lowerCAmelCase : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase : int = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCAmelCase : Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCAmelCase : Optional[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCAmelCase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCAmelCase : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCAmelCase : Union[str, Any] = sigma_next - sigma_hat
_lowerCAmelCase : int = self.sample
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def __A ( self , a__ , a__ , a__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowerCAmelCase : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a__ ):
# mps does not support float64
_lowerCAmelCase : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_lowerCAmelCase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_lowerCAmelCase : Any = self.timesteps.to(original_samples.device )
_lowerCAmelCase : int = timesteps.to(original_samples.device )
_lowerCAmelCase : str = [self.index_for_timestep(a__ , a__ ) for t in timesteps]
_lowerCAmelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCAmelCase : str = sigma.unsqueeze(-1 )
_lowerCAmelCase : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 44 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = LEDTokenizer
_SCREAMING_SNAKE_CASE = LEDTokenizerFast
_SCREAMING_SNAKE_CASE = True
def _snake_case ( self ) -> Union[str, Any]:
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def _snake_case ( self , **lowercase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , lowercase ) -> Optional[Any]:
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ) -> Any:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _snake_case ( self ) -> Optional[Any]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(lowercase , max_length=len(lowercase ) , padding=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase )
@require_torch
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertNotIn("""labels""" , lowercase )
self.assertNotIn("""decoder_attention_mask""" , lowercase )
@require_torch
def _snake_case ( self ) -> str:
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def _snake_case ( self ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=lowercase , truncation=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def _snake_case ( self ) -> str:
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(lowercase , return_tensors="""pt""" )
lowerCAmelCase = tokenizer(text_target=lowercase , return_tensors="""pt""" )
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _snake_case ( self ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = ["""Summary of the text.""", """Another summary."""]
lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase = tokenizer(lowercase , padding=lowercase )
lowerCAmelCase = [[0] * len(lowercase ) for x in encoded_output["""input_ids"""]]
lowerCAmelCase = tokenizer.pad(lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowercase )
def _snake_case ( self ) -> Optional[Any]:
pass
def _snake_case ( self ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
lowerCAmelCase = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 46 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( A__ ):
def __init__( self : List[str] , _a : Optional[Any]="" , _a : Any="train" ) -> int:
'''simple docstring'''
assert os.path.isdir(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =os.listdir(_a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_SCREAMING_SNAKE_CASE =os.path.join(_a , _a )
if not os.path.isfile(_a ):
continue
self.documents.append(_a )
def __len__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Optional[int] , _a : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.documents[idx]
_SCREAMING_SNAKE_CASE =document_path.split('/' )[-1]
with open(_a , encoding='utf-8' ) as source:
_SCREAMING_SNAKE_CASE =source.read()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_a )
return document_name, story_lines, summary_lines
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(filter(lambda _UpperCamelCase : len(_UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
_SCREAMING_SNAKE_CASE =[_add_missing_period(_UpperCamelCase ) for line in nonempty_lines]
# gather article lines
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =deque(_UpperCamelCase )
while True:
try:
_SCREAMING_SNAKE_CASE =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_SCREAMING_SNAKE_CASE =list(filter(lambda _UpperCamelCase : not t.startswith('@highlight' ) , _UpperCamelCase ) )
return story_lines, summary_lines
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
if len(_UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCamelCase )) )
return sequence
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.ones_like(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =sequence == pad_token_id
_SCREAMING_SNAKE_CASE =0
return mask
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[tokenizer.encode(_UpperCamelCase ) for line in story_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in story_lines_token_ids for token in sentence]
_SCREAMING_SNAKE_CASE =[tokenizer.encode(_UpperCamelCase ) for line in summary_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for sequence in batch:
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_UpperCamelCase )
return torch.tensor(_UpperCamelCase )
| 47 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class _A :
def __init__( self : Optional[Any]):
'''simple docstring'''
__a = {}
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = {}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE)
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE)
__a = probability
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return list(self.connections)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = 0
__a = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a = Counter(graph.get_nodes() )
__a = start
for _ in range(_UpperCAmelCase ):
__a = graph.transition(_UpperCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Dict = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
snake_case_ : Optional[int] = datasets.logging.get_logger(__name__)
snake_case_ : Tuple = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
snake_case_ : int = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
snake_case_ : str = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
snake_case_ : Tuple = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : str):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : Dict):
"""simple docstring"""
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''')
UpperCAmelCase_ = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCAmelCase_ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCAmelCase_ = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""")
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCAmelCase_ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
UpperCAmelCase_ = score.BleurtScorer(os.path.join(_snake_case , _snake_case))
def lowerCamelCase ( self : Any , _snake_case : int , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.scorer.score(references=_snake_case , candidates=_snake_case)
return {"scores": scores}
| 51 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = (DDIMParallelScheduler,)
_UpperCAmelCase :Any = (('eta', 0.0), ('num_inference_steps', 5_0))
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**A_ )
return config
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = self.scheduler_classes[0]
UpperCamelCase : List[Any] = self.get_scheduler_config(**A_ )
UpperCamelCase : Dict = scheduler_class(**A_ )
UpperCamelCase , UpperCamelCase : Tuple = 10, 0.0
UpperCamelCase : List[Any] = self.dummy_model()
UpperCamelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for t in scheduler.timesteps:
UpperCamelCase : Any = model(A_ , A_ )
UpperCamelCase : Any = scheduler.step(A_ , A_ , A_ , A_ ).prev_sample
return sample
def __UpperCamelCase( self ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
UpperCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase : List[str] = scheduler_class(**A_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __UpperCamelCase( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def __UpperCamelCase( self ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=A_ , num_inference_steps=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=A_ , eta=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.scheduler_classes[0]
UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase : List[Any] = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.scheduler_classes[0]
UpperCamelCase : Any = self.get_scheduler_config()
UpperCamelCase : List[Any] = scheduler_class(**A_ )
UpperCamelCase , UpperCamelCase : Tuple = 10, 0.0
scheduler.set_timesteps(A_ )
UpperCamelCase : Tuple = self.dummy_model()
UpperCamelCase : List[str] = self.dummy_sample_deter
UpperCamelCase : Optional[int] = self.dummy_sample_deter + 0.1
UpperCamelCase : Optional[int] = self.dummy_sample_deter - 0.1
UpperCamelCase : Optional[Any] = samplea.shape[0]
UpperCamelCase : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase : int = torch.arange(A_ )[0:3, None].repeat(1 , A_ )
UpperCamelCase : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase : Optional[int] = scheduler.batch_step_no_noise(A_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A_ )
UpperCamelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Any = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.full_loop()
UpperCamelCase : Dict = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase : List[str] = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
UpperCamelCase : Dict = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) )
UpperCamelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 52 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class snake_case :
"""simple docstring"""
def _lowerCamelCase ( self : Any , __A : Optional[Any] , __A : Any , __A : Union[str, Any] ):
return None
class snake_case :
"""simple docstring"""
def _lowerCamelCase ( self : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Optional[int] ):
return None
class snake_case ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =[
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _lowerCamelCase ( self : Optional[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , 'tf' , 1_2 , **__A )
@require_torch
@slow
def _lowerCamelCase ( self : Optional[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , 'pt' , 1_2 , **__A )
@require_torch
@slow
def _lowerCamelCase ( self : Any ):
from transformers import BertModel
__UpperCamelCase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__A ) )
vocab_file.flush()
__UpperCamelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__UpperCamelCase = BertModel(BertConfig(vocab_size=len(__A ) ) )
model.save_pretrained(__A )
self._test_export(__A , 'pt' , 1_2 , __A )
@require_tf
@slow
def _lowerCamelCase ( self : List[str] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__UpperCamelCase = self._test_export(__A , 'tf' , 1_2 , **__A )
__UpperCamelCase = quantize(Path(__A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _lowerCamelCase ( self : int ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__UpperCamelCase = self._test_export(__A , 'pt' , 1_2 , **__A )
__UpperCamelCase = quantize(__A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _lowerCamelCase ( self : List[str] , __A : Union[str, Any] , __A : Any , __A : str , __A : Tuple=None , **__A : Tuple ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
__UpperCamelCase = Path(__A ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__A , __A , __A , __A , __A , **__A )
return path
except Exception as e:
self.fail(__A )
@require_torch
@require_tokenizers
@slow
def _lowerCamelCase ( self : Optional[Any] ):
from transformers import BertModel
__UpperCamelCase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__UpperCamelCase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__A , __A , 'pt' )
@require_tf
@require_tokenizers
@slow
def _lowerCamelCase ( self : Optional[Any] ):
from transformers import TFBertModel
__UpperCamelCase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__UpperCamelCase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__A , __A , 'tf' )
def _lowerCamelCase ( self : Tuple , __A : int , __A : Optional[int] , __A : int ):
__UpperCamelCase = FeatureExtractionPipeline(__A , __A )
__UpperCamelCase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = infer_shapes(__A , __A )
# Assert all variables are present
self.assertEqual(len(__A ) , len(__A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __A )
self.assertSequenceEqual(variable_names[3:] , __A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = ['input_ids', 'attention_mask', 'token_type_ids']
__UpperCamelCase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
__UpperCamelCase , __UpperCamelCase = ensure_valid_input(FuncContiguousArgs() , __A , __A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__A ) , set(__A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__A , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__UpperCamelCase , __UpperCamelCase = ensure_valid_input(FuncNonContiguousArgs() , __A , __A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__A ) , 1 )
self.assertEqual(len(__A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 53 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ : Any = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
a__ : Optional[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__ , a__ : Union[str, Any] = gray_img.shape
# set different points to rotate image
a__ : Optional[Any] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a__ : List[str] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a__ : Optional[Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a__ : Tuple = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a__ : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ : Any = plt.figure(1)
a__ : Optional[int] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 54 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
# TODO Update this
a_ : List[str] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "esm"
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=1026 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase="absolute" , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , mask_token_id=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = emb_layer_norm_before
lowerCamelCase_ = token_dropout
lowerCamelCase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowerCamelCase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = EsmFoldConfig(**UpperCamelCase )
lowerCamelCase_ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowerCamelCase_ = get_default_vocab_list()
else:
lowerCamelCase_ = vocab_list
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , UpperCamelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase ):
lowerCamelCase_ = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = 1_28
_lowerCamelCase = None
def snake_case ( self ):
"""simple docstring"""
if self.trunk is None:
lowerCamelCase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase ):
lowerCamelCase_ = TrunkConfig(**self.trunk )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = asdict(self )
lowerCamelCase_ = self.trunk.to_dict()
return output
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = 48
_lowerCamelCase = 10_24
_lowerCamelCase = 1_28
_lowerCamelCase = 32
_lowerCamelCase = 32
_lowerCamelCase = 32
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = False
_lowerCamelCase = 4
_lowerCamelCase = 1_28
_lowerCamelCase = None
def snake_case ( self ):
"""simple docstring"""
if self.structure_module is None:
lowerCamelCase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase ):
lowerCamelCase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowerCamelCase_ = self.sequence_state_dim // self.sequence_head_width
lowerCamelCase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = asdict(self )
lowerCamelCase_ = self.structure_module.to_dict()
return output
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = 3_84
_lowerCamelCase = 1_28
_lowerCamelCase = 16
_lowerCamelCase = 1_28
_lowerCamelCase = 12
_lowerCamelCase = 4
_lowerCamelCase = 8
_lowerCamelCase = 0.1
_lowerCamelCase = 8
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 7
_lowerCamelCase = 10
_lowerCamelCase = 1e-8
_lowerCamelCase = 1e5
def snake_case ( self ):
"""simple docstring"""
return asdict(self )
def __snake_case ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : str = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "funnel"
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : List[Any] , lowercase_ : Optional[int]=3_0522 , lowercase_ : Optional[int]=[4, 4, 4] , lowercase_ : str=None , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=768 , lowercase_ : List[str]=12 , lowercase_ : List[str]=64 , lowercase_ : Optional[int]=3072 , lowercase_ : Optional[int]="gelu_new" , lowercase_ : List[Any]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]=None , lowercase_ : int=1e-9 , lowercase_ : Union[str, Any]="mean" , lowercase_ : Dict="relative_shift" , lowercase_ : Optional[int]=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=True , **lowercase_ : Dict , ):
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = [1] * len(lowercase_ ) if block_repeats is None else block_repeats
assert len(lowercase_ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = initializer_range
snake_case_ = initializer_std
snake_case_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
snake_case_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
snake_case_ = attention_type
snake_case_ = separate_cls
snake_case_ = truncate_seq
snake_case_ = pool_q_only
super().__init__(**lowercase_ )
@property
def A_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def A_ ( self : Union[str, Any] , lowercase_ : Dict ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def A_ ( self : Optional[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def A_ ( self : Tuple , lowercase_ : Union[str, Any] ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 56 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A : int = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
super().__init__(*__a , **__a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case ( self , __a=None , __a=None , __a=None ):
__lowerCAmelCase = {}
__lowerCAmelCase = {}
if prompt is not None:
__lowerCAmelCase = prompt
if generate_kwargs is not None:
__lowerCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowerCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
__lowerCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __a , **__a ):
return super().__call__(__a , **__a )
def snake_case ( self , __a , __a=None ):
__lowerCAmelCase = load_image(__a )
if prompt is not None:
if not isinstance(__a , __a ):
raise ValueError(
f"Received an invalid text input, got - {type(__a )} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation." )
__lowerCAmelCase = self.model.config.model_type
if model_type == "git":
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
__lowerCAmelCase = self.tokenizer(text=__a , add_special_tokens=__a ).input_ids
__lowerCAmelCase = [self.tokenizer.cls_token_id] + input_ids
__lowerCAmelCase = torch.tensor(__a ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
__lowerCAmelCase = self.image_processor(images=__a , header_text=__a , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
__lowerCAmelCase = self.tokenizer(__a , return_tensors=self.framework )
model_inputs.update(__a )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowerCAmelCase = None
return model_inputs
def snake_case ( self , __a , __a=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , __a )
and all(x is None for x in model_inputs["input_ids"] )
):
__lowerCAmelCase = None
if generate_kwargs is None:
__lowerCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowerCAmelCase = model_inputs.pop(self.model.main_input_name )
__lowerCAmelCase = self.model.generate(__a , **__a , **__a )
return model_outputs
def snake_case ( self , __a ):
__lowerCAmelCase = []
for output_ids in model_outputs:
__lowerCAmelCase = {
"generated_text": self.tokenizer.decode(
__a , skip_special_tokens=__a , )
}
records.append(__a )
return records
| 57 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=0.999 , __lowerCamelCase : Dict="cosine" , ) ->Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_SCREAMING_SNAKE_CASE = []
for i in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
_SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ) , __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase , dtype=torch.floataa )
class a_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCamelCase = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase = 2
@register_to_config
def __init__( self , A = 1000 , A = 0.0_0085 , A = 0.012 , A = "linear" , A = None , A = "epsilon" , A = "linspace" , A = 0 , ) -> Dict:
if trained_betas is not None:
_SCREAMING_SNAKE_CASE = torch.tensor(A , dtype=torch.floataa )
elif beta_schedule == "linear":
_SCREAMING_SNAKE_CASE = torch.linspace(A , A , A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_SCREAMING_SNAKE_CASE = betas_for_alpha_bar(A )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
_SCREAMING_SNAKE_CASE = 1.0 - self.betas
_SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A , A , A )
def snake_case_( self , A , A=None ) -> Union[str, Any]:
if schedule_timesteps is None:
_SCREAMING_SNAKE_CASE = self.timesteps
_SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_SCREAMING_SNAKE_CASE = 1 if len(A ) > 1 else 0
else:
_SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(A ) else timestep
_SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_( self ) -> Tuple:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_( self , A , A , ) -> torch.FloatTensor:
_SCREAMING_SNAKE_CASE = self.index_for_timestep(A )
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE = self.sigmas[step_index]
else:
_SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
_SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_( self , A , A = None , A = None , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = num_inference_steps
_SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_SCREAMING_SNAKE_CASE = np.linspace(0 , num_train_timesteps - 1 , A , dtype=A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE = (np.arange(0 , A ) * step_ratio).round()[::-1].copy().astype(A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE = (np.arange(A , 0 , -step_ratio )).round().copy().astype(A )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
_SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_SCREAMING_SNAKE_CASE = torch.from_numpy(np.log(A ) ).to(A )
_SCREAMING_SNAKE_CASE = np.interp(A , np.arange(0 , len(A ) ) , A )
_SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE = torch.from_numpy(A ).to(device=A )
# interpolate sigmas
_SCREAMING_SNAKE_CASE = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_SCREAMING_SNAKE_CASE = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(A ).startswith("""mps""" ):
# mps does not support float64
_SCREAMING_SNAKE_CASE = torch.from_numpy(A ).to(A , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE = torch.from_numpy(A ).to(A )
# interpolate timesteps
_SCREAMING_SNAKE_CASE = self.sigma_to_t(A ).to(A , dtype=timesteps.dtype )
_SCREAMING_SNAKE_CASE = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], interleaved_timesteps] )
_SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_SCREAMING_SNAKE_CASE = defaultdict(A )
def snake_case_( self , A ) -> Optional[Any]:
# get log sigma
_SCREAMING_SNAKE_CASE = sigma.log()
# get distribution
_SCREAMING_SNAKE_CASE = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_SCREAMING_SNAKE_CASE = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_SCREAMING_SNAKE_CASE = low_idx + 1
_SCREAMING_SNAKE_CASE = self.log_sigmas[low_idx]
_SCREAMING_SNAKE_CASE = self.log_sigmas[high_idx]
# interpolate sigmas
_SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
_SCREAMING_SNAKE_CASE = w.clamp(0 , 1 )
# transform interpolation to time range
_SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
_SCREAMING_SNAKE_CASE = t.view(sigma.shape )
return t
@property
def snake_case_( self ) -> int:
return self.sample is None
def snake_case_( self , A , A , A , A = True , ) -> Union[SchedulerOutput, Tuple]:
_SCREAMING_SNAKE_CASE = self.index_for_timestep(A )
# advance index counter by 1
_SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE = self.sigmas[step_index]
_SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index + 1]
_SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
_SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index]
_SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
_SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol
_SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_SCREAMING_SNAKE_CASE = sigma_interpol - sigma_hat
# store for 2nd order step
_SCREAMING_SNAKE_CASE = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
_SCREAMING_SNAKE_CASE = self.sample
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def snake_case_( self , A , A , A , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A ):
# mps does not support float64
_SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE = [self.index_for_timestep(A , A ) for t in timesteps]
_SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 58 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class UpperCAmelCase ( A_ ):
A__ : List[Any] = "xlm-roberta"
def __init__(self : Dict , snake_case__ : List[Any]=3_05_22 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=12 , snake_case__ : Optional[int]=12 , snake_case__ : Optional[int]=30_72 , snake_case__ : Any="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : str=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Dict=1e-12 , snake_case__ : Union[str, Any]=1 , snake_case__ : str=0 , snake_case__ : str=2 , snake_case__ : Union[str, Any]="absolute" , snake_case__ : str=True , snake_case__ : List[str]=None , **snake_case__ : Tuple , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
snake_case : int = vocab_size
snake_case : Union[str, Any] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[Any] = hidden_act
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : Optional[Any] = initializer_range
snake_case : Any = layer_norm_eps
snake_case : str = position_embedding_type
snake_case : List[Any] = use_cache
snake_case : List[str] = classifier_dropout
class UpperCAmelCase ( A_ ):
@property
def _SCREAMING_SNAKE_CASE (self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 59 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCAmelCase : Optional[Any] = ksize + 1
lowerCAmelCase : List[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_snake_case ):
for x in range(_snake_case ):
# distance from center
lowerCAmelCase : Any = x - ksize // 2
lowerCAmelCase : Any = y - ksize // 2
# degree to radiant
lowerCAmelCase : Any = theta / 180 * np.pi
lowerCAmelCase : Tuple = np.cos(_theta )
lowerCAmelCase : Dict = np.sin(_theta )
# get kernel x
lowerCAmelCase : str = cos_theta * px + sin_theta * py
# get kernel y
lowerCAmelCase : List[Any] = -sin_theta * px + cos_theta * py
# fill kernel
lowerCAmelCase : Tuple = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
snake_case__ : List[str] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
snake_case__ : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
snake_case__ : Any = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
snake_case__ : Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
snake_case__ : Tuple = out / out.max() * 255
snake_case__ : Optional[Any] = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 60 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
"""simple docstring"""
import os
_a = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = 0
while index < len(__lowerCamelCase ) - 1:
UpperCAmelCase_ : Tuple = SYMBOLS[numerals[index]]
UpperCAmelCase_ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCAmelCase_ : Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCAmelCase_ : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __a ( __lowerCamelCase = "/p089_roman.txt" ):
UpperCAmelCase_ : int = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
UpperCAmelCase_ : Optional[Any] = filea.readlines()
for line in lines:
UpperCAmelCase_ : Tuple = line.strip()
UpperCAmelCase_ : Optional[Any] = parse_roman_numerals(__lowerCamelCase )
UpperCAmelCase_ : Tuple = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "poolformer"
def __init__( self , A_=3 , A_=16 , A_=16 , A_=3 , A_=4.0 , A_=[2, 2, 6, 2] , A_=[64, 128, 320, 512] , A_=[7, 3, 3, 3] , A_=[4, 2, 2, 2] , A_=[2, 1, 1, 1] , A_=4 , A_=0.0 , A_="gelu" , A_=True , A_=1E-5 , A_=0.02 , **A_ , ) -> Optional[int]:
__UpperCamelCase =num_channels
__UpperCamelCase =patch_size
__UpperCamelCase =stride
__UpperCamelCase =padding
__UpperCamelCase =pool_size
__UpperCamelCase =hidden_sizes
__UpperCamelCase =mlp_ratio
__UpperCamelCase =depths
__UpperCamelCase =patch_sizes
__UpperCamelCase =strides
__UpperCamelCase =num_encoder_blocks
__UpperCamelCase =drop_path_rate
__UpperCamelCase =hidden_act
__UpperCamelCase =use_layer_scale
__UpperCamelCase =layer_scale_init_value
__UpperCamelCase =initializer_range
super().__init__(**A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = version.parse("1.11" )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self ) -> float:
return 2E-3
| 62 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
'''simple docstring'''
import math
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = sum(i * i for i in range(1 , n + 1 ) )
_a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
A_ = {
'''gpt-neox-20b''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: Dict, a_: Dict=None, a_: Tuple=None, a_: Dict=None, a_: str="<|endoftext|>", a_: Union[str, Any]="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: Optional[int]=False, **a_: str, ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
_snake_case : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Optional[int] = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Any = add_prefix_space
_snake_case : List[str] = pre_tok_class(**a_ )
_snake_case : Tuple = add_prefix_space
def UpperCamelCase_ ( self: List[Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Dict = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: List[Any], a_: "Conversation" ):
'''simple docstring'''
_snake_case : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a_, add_special_tokens=a_ ) + [self.eos_token_id] )
if len(a_ ) > self.model_max_length:
_snake_case : int = input_ids[-self.model_max_length :]
return input_ids
| 64 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A :
__UpperCAmelCase : CommonSchedulerState
# setable values
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : Optional[int] = None
@classmethod
def lowercase_ (cls : Any , __UpperCAmelCase : CommonSchedulerState , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : jnp.ndarray ) -> List[str]:
"""simple docstring"""
return cls(common=__UpperCAmelCase , init_noise_sigma=__UpperCAmelCase , timesteps=__UpperCAmelCase )
@dataclass
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : DDPMSchedulerState
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : Any = [e.name for e in FlaxKarrasDiffusionSchedulers]
__UpperCAmelCase : jnp.dtype
@property
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
return True
@register_to_config
def __init__(self : Optional[Any] , __UpperCAmelCase : int = 1_0_0_0 , __UpperCAmelCase : float = 0.0001 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : str = "linear" , __UpperCAmelCase : Optional[jnp.ndarray] = None , __UpperCAmelCase : str = "fixed_small" , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : jnp.dtype = jnp.floataa , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = dtype
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
UpperCAmelCase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCAmelCase , init_noise_sigma=__UpperCAmelCase , timesteps=__UpperCAmelCase , )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : DDPMSchedulerState , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def lowercase_ (self : Any , __UpperCAmelCase : DDPMSchedulerState , __UpperCAmelCase : int , __UpperCAmelCase : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
UpperCAmelCase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ = (jnp.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase , )
def lowercase_ (self : str , __UpperCAmelCase : DDPMSchedulerState , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = state.common.alphas_cumprod[t]
UpperCAmelCase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ = jnp.clip(__UpperCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ = jnp.log(jnp.clip(__UpperCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ = variance
UpperCAmelCase__ = state.common.betas[t]
UpperCAmelCase__ = (predicted_variance + 1) / 2
UpperCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowercase_ (self : int , __UpperCAmelCase : DDPMSchedulerState , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : Optional[jax.random.KeyArray] = None , __UpperCAmelCase : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase__ = timestep
if key is None:
UpperCAmelCase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ = jnp.split(__UpperCAmelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ = None
# 1. compute alphas, betas
UpperCAmelCase__ = state.common.alphas_cumprod[t]
UpperCAmelCase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ = jnp.clip(__UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ = jax.random.split(__UpperCAmelCase , num=1 )
UpperCAmelCase__ = jax.random.normal(__UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCAmelCase , __UpperCAmelCase , predicted_variance=__UpperCAmelCase ) ** 0.5) * noise
UpperCAmelCase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCAmelCase , state=__UpperCAmelCase )
def lowercase_ (self : int , __UpperCAmelCase : DDPMSchedulerState , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : DDPMSchedulerState , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __len__(self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 65 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :str = TaConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case_ :Optional[Any] = TaForConditionalGeneration(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowercase, _lowercase, _lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 66 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> list:
__lowerCamelCase = len(UpperCamelCase__ )
__lowerCamelCase = []
for i in range(len(UpperCamelCase__ ) - pat_len + 1 ):
__lowerCamelCase = True
for j in range(UpperCamelCase__ ):
if s[i + j] != pattern[j]:
__lowerCamelCase = False
break
if match_found:
position.append(UpperCamelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 67 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
lowerCAmelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = [False] * len(SCREAMING_SNAKE_CASE_ )
A__ = [s]
A__ = True
while queue:
A__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE_ )
A__ = True
A__ = u
return visited[t]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Any ) -> Optional[Any]:
'''simple docstring'''
A__ = [-1] * (len(SCREAMING_SNAKE_CASE_ ))
A__ = 0
A__ = []
A__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = float("Inf" )
A__ = sink
while s != source:
# Find the minimum value in select path
A__ = min(SCREAMING_SNAKE_CASE_ , graph[parent[s]][s] )
A__ = parent[s]
max_flow += path_flow
A__ = sink
while v != source:
A__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A__ = parent[v]
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 68 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__UpperCamelCase = logging.getLogger(__name__)
__UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
SCREAMING_SNAKE_CASE_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def a_ ( self) -> Tuple:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ = field(default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
SCREAMING_SNAKE_CASE_ = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def a_ ( self) -> Optional[Any]:
if self.train_file is not None:
snake_case_ = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
snake_case_ = [json.loads(UpperCAmelCase ) for line in f.read().splitlines() if (len(UpperCAmelCase ) > 0 and not line.isspace())]
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
snake_case_ = {c: dataset[c] for c in dataset.column_names}
snake_case_ = refs
return Dataset.from_dict(UpperCAmelCase )
def UpperCAmelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
if extension == "txt":
snake_case_ = 'text'
snake_case_ = load_dataset(UpperCAmelCase , data_files=UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
snake_case_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
snake_case_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
snake_case_ = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
snake_case_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
snake_case_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
snake_case_ = AutoModelForMaskedLM.from_config(UpperCAmelCase )
model.resize_token_embeddings(len(UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case_ = datasets['train'].column_names
else:
snake_case_ = datasets['validation'].column_names
snake_case_ = 'text' if 'text' in column_names else column_names[0]
snake_case_ = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase ):
# Remove empty lines
snake_case_ = [line for line in examples['text'] if len(UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=data_args.max_seq_length )
snake_case_ = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case_ = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case_ = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case_ = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case_ = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case_ = model_args.model_name_or_path
else:
snake_case_ = None
snake_case_ = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = math.exp(eval_output['eval_loss'] )
snake_case_ = perplexity
snake_case_ = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 69 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A__ : Optional[int] =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A__ : List[Any] =logging.getLogger()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_lowerCAmelCase = parser.parse_args()
return args.f
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase="eval" ):
"""simple docstring"""
_lowerCAmelCase = os.path.join(lowerCAmelCase , f"{split}_results.json" )
if os.path.exists(lowerCAmelCase ):
with open(lowerCAmelCase , """r""" ) as f:
return json.load(lowerCAmelCase )
raise ValueError(f"can't find {path}" )
A__ : int =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_flax_glue.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_clm_flax.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_summarization_flax.main()
_lowerCAmelCase = get_results(__snake_case , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def lowercase__ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_mlm_flax.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_ta_mlm_flax.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def lowercase__ ( self : Optional[Any] ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_flax_ner.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_qa.main()
_lowerCAmelCase = get_results(__snake_case )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 70 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[str] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =AutoModel.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =AutoModelForPreTraining.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Tuple =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Tuple =TFAutoModelForCausalLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =AutoModelForCausalLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =AutoModelForCausalLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : int =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Tuple =TFAutoModelForMaskedLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =AutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Optional[Any] =AutoModelForMaskedLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[Any] =AutoModelForSeqaSeqLM.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =AutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
__UpperCamelCase : str =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
__UpperCamelCase : int =AutoModelWithLMHead.from_pretrained(lowerCamelCase__ , from_tf=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
| 71 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case_ ( A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = u
for i in range(1, A_ ):
_lowerCamelCase : int = temp * (u - i)
return temp
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(input('''enter the numbers of values: ''' ) )
_lowerCamelCase : list[list[float]] = []
for _ in range(A_ ):
y.append([] )
for i in range(A_ ):
for j in range(A_ ):
y[i].append(A_ )
_lowerCamelCase : Optional[int] = 0
print('''enter the values of parameters in a list: ''' )
_lowerCamelCase : Optional[int] = list(map(A_, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A_ ):
_lowerCamelCase : Dict = float(input() )
_lowerCamelCase : Tuple = int(input('''enter the value to interpolate: ''' ) )
_lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, A_ ):
for j in range(n - i ):
_lowerCamelCase : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
_lowerCamelCase : Any = y[0][0]
for i in range(1, A_ ):
summ += (ucal(A_, A_ ) * y[0][i]) / math.factorial(A_ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 72 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a ={
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
a ={
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
a ={
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = BertTokenizer
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[UNK]" ,SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" ,SCREAMING_SNAKE_CASE__ : str="[PAD]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" ,SCREAMING_SNAKE_CASE__ : str="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' ,SCREAMING_SNAKE_CASE__) != do_lower_case
or normalizer_state.get('strip_accents' ,SCREAMING_SNAKE_CASE__) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars
):
__lowerCamelCase : Any = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop('type'))
__lowerCamelCase : str = do_lower_case
__lowerCamelCase : Any = strip_accents
__lowerCamelCase : Any = tokenize_chinese_chars
__lowerCamelCase : int = normalizer_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=None):
__lowerCamelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : str = [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__)
return tuple(SCREAMING_SNAKE_CASE__)
| 73 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =8
# DPR tok
lowerCamelCase_ =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, '''dpr_tokenizer''' )
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
lowerCamelCase_ =os.path.join(lowerCAmelCase, DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCamelCase_ =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase_ ={'''unk_token''': '''<unk>'''}
lowerCamelCase_ =os.path.join(self.tmpdirname, '''bart_tokenizer''' )
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
lowerCamelCase_ =os.path.join(lowerCAmelCase, BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(lowerCAmelCase, BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''dpr_tokenizer''' ) )
def lowercase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, '''bart_tokenizer''' ) )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''', string_factory='''Flat''', metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_dataset()
lowerCamelCase_ =RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCamelCase_ =dataset
lowerCamelCase_ =RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
return retriever
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_dataset()
lowerCamelCase_ =RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='''custom''', )
if from_disk:
lowerCamelCase_ =os.path.join(self.tmpdirname, '''dataset''' )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname, '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname, '''dataset''' ) )
del dataset
lowerCamelCase_ =RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
else:
lowerCamelCase_ =RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, lowerCAmelCase ), )
return retriever
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''', string_factory='''Flat''', metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''', index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''], open(index_file_name + '''.index_meta.dpr''', '''wb''' ) )
lowerCamelCase_ =os.path.join(self.tmpdirname, '''psgs_w100.tsv.pkl''' )
lowerCamelCase_ ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(lowerCAmelCase, open(lowerCAmelCase, '''wb''' ) )
lowerCamelCase_ =RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='''legacy''', index_path=self.tmpdirname, )
lowerCamelCase_ =RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCamelCase_ =self.get_dummy_dataset()
retriever.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0], '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0], '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_legacy_index_retriever()
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0], '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0], '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ =[[5, 7], [10, 11]]
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever(lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, np.ndarray )
lowerCamelCase_ =retriever(
lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase, return_tensors='''pt''', )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase_ =1
lowerCamelCase_ =self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
retriever.set_ctx_encoder_tokenizer(lowerCAmelCase )
lowerCamelCase_ =[[5, 7], [10, 11]]
lowerCamelCase_ =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowerCamelCase_ =retriever(lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase )
self.assertEqual(
len(lowerCAmelCase ), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ), lowerCAmelCase ) # check for doc token related keys in dictionary.
| 75 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
a_ = datasets.utils.logging.get_logger(__name__)
class _UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =datasets.Audio()
lowerCamelCase__ ='audio'
lowerCamelCase__ =AudioFolderConfig
lowerCamelCase__ =42 # definition at the bottom of the script
lowerCamelCase__ =AudioClassification(audio_column='audio' , label_column='label' )
a_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
a_ = AUDIO_EXTENSIONS | 76 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCamelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."})
lowerCamelCase__ : Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
lowerCamelCase__ : int = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Overwrite the cached preprocessed datasets or not."})
lowerCamelCase__ : bool = field(
default=_a , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "A csv or a json file containing the training data."})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "A csv or a json file containing the validation data."})
lowerCamelCase__ : Optional[str] = field(default=_a , metadata={"help": "A csv or a json file containing the test data."})
def _UpperCAmelCase ( self ) -> Optional[Any]:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
lowercase__ : Optional[int] = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase__ : Optional[Any] = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : str = field(
default=_a , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase__ : bool = field(
default=_a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ : int = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase__ : Tuple = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase__ : List[Any] = data_args.train_file.split('.' )[-1]
lowercase__ : Optional[int] = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase__ : List[str] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
lowercase__ : List[Any] = load_dataset('csv' , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase__ : List[Any] = load_dataset('json' , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase__ : int = raw_datasets['train'].features['label'].names
lowercase__ : Union[str, Any] = len(_lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase__ : List[str] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowerCAmelCase , )
lowercase__ : Optional[int] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ : Tuple = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ : Optional[int] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase__ : str = {'Refused': 0, 'Entailed': 1}
lowercase__ : Dict = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowercase__ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_lowerCAmelCase : Optional[int] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowerCAmelCase : Union[str, Any] ):
lowercase__ : Optional[int] = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
lowercase__ : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase__ : Tuple = examples['statement']
lowercase__ : int = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
lowercase__ : Optional[int] = tokenizer(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase )
lowercase__ : str = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
lowercase__ : str = raw_datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowercase__ : int = raw_datasets['train']
if data_args.max_train_samples is not None:
lowercase__ : int = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowercase__ : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowercase__ : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
lowercase__ : List[Any] = raw_datasets['test']
if data_args.max_predict_samples is not None:
lowercase__ : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_lowerCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase : EvalPrediction ):
lowercase__ : Optional[int] = p.predictions[0] if isinstance(p.predictions , _lowerCAmelCase ) else p.predictions
lowercase__ : List[str] = np.argmax(_lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ : List[str] = default_data_collator
elif training_args.fpaa:
lowercase__ : Optional[Any] = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 )
else:
lowercase__ : str = None
# Initialize our Trainer
lowercase__ : int = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Optional[int] = last_checkpoint
lowercase__ : Tuple = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
lowercase__ : Tuple = train_result.metrics
lowercase__ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
lowercase__ : Any = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _lowerCAmelCase )
trainer.save_metrics('train' , _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ : Union[str, Any] = trainer.evaluate(eval_dataset=_lowerCAmelCase )
lowercase__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
lowercase__ : List[str] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics('eval' , _lowerCAmelCase )
trainer.save_metrics('eval' , _lowerCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase__ : int = predict_dataset.remove_columns('label' )
lowercase__ : Any = trainer.predict(_lowerCAmelCase , metric_key_prefix='predict' ).predictions
lowercase__ : str = np.argmax(_lowerCAmelCase , axis=1 )
lowercase__ : Optional[Any] = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_lowerCAmelCase ):
lowercase__ : Optional[int] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
lowercase__ : Union[str, Any] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 77 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
"""simple docstring"""
snake_case_ = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def _lowerCAmelCase ( lowercase_ ):
assert type(lowercase_ ) in (int, float) and decimal == int(lowercase_ )
UpperCAmelCase = int(lowercase_ )
UpperCAmelCase = ''
UpperCAmelCase = False
if decimal < 0:
UpperCAmelCase = True
decimal *= -1
while decimal > 0:
UpperCAmelCase , UpperCAmelCase = divmod(lowercase_ , 16 )
UpperCAmelCase = values[remainder] + hexadecimal
UpperCAmelCase = '0x' + hexadecimal
if negative:
UpperCAmelCase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''audio_values''', '''audio_mask''']
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[Any]=2048 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=[16, 16] , __UpperCAmelCase : int=128 , __UpperCAmelCase : Union[str, Any]=44100 , __UpperCAmelCase : Optional[int]=86 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : Dict=0.0 , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase , )
_A = spectrogram_length
_A = num_channels
_A = patch_size
_A = feature_size // self.patch_size[1]
_A = n_fft
_A = sampling_rate // hop_length_to_sampling_rate
_A = sampling_rate
_A = padding_value
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__UpperCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : np.array ):
'''simple docstring'''
_A = spectrogram(
__UpperCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
_A = log_spec[:, :-1]
_A = log_spec - 20.0
_A = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
_A = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_A = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __UpperCAmelCase ):
_A = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_A = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_A = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_A = np.array(__UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
_A = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_A = np.ones([len(__UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_A = padded_audio_features * self.padding_value
for i in range(len(__UpperCAmelCase ) ):
_A = audio_features[i]
_A = feature
# return as BatchFeature
if return_attention_mask:
_A = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
_A = {"audio_values": padded_audio_features}
_A = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
return encoded_inputs
| 79 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
def __init__( self , *a , **a ):
super().__init__(*a , **a )
requires_backends(self , "vision" )
self.check_model_type(a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , **a ):
return {}, {}, {}
def __a ( self , a ):
UpperCamelCase__ = load_image(a )
UpperCamelCase__ = image.size
UpperCamelCase__ = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def __a ( self , a ):
UpperCamelCase__ = self.model(**a )
return model_outputs
def __a ( self , a ):
UpperCamelCase__ = model_outputs.predicted_depth
UpperCamelCase__ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=a )
UpperCamelCase__ = prediction.squeeze().cpu().numpy()
UpperCamelCase__ = (output * 2_55 / np.max(a )).astype("uint8" )
UpperCamelCase__ = Image.fromarray(a )
UpperCamelCase__ = {}
UpperCamelCase__ = predicted_depth
UpperCamelCase__ = depth
return output_dict
| 80 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = XLNetTokenizer
__lowerCAmelCase = XLNetTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
a =XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a ='''<s>'''
a =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(__A ) , 1006 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =XLNetTokenizer(__A , keep_accents=__A )
a =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
a =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a =tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
a =tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =XLNetTokenizer(__A , do_lower_case=__A )
a =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =XLNetTokenizer(__A , do_lower_case=__A )
a =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# fmt: off
a ={'''input_ids''': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , ) | 81 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_lowerCAmelCase = remove_duplicates(key.upper() )
_lowerCAmelCase = len(snake_case )
# First fill cipher with key characters
_lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(snake_case ) , 26 ):
_lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCAmelCase = alphabet[i - offset]
_lowerCAmelCase = char
return cipher_alphabet
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return "".join(cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = input("""Enter message to encode or decode: """ ).strip()
_lowerCAmelCase = input("""Enter keyword: """ ).strip()
_lowerCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
_lowerCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
_lowerCAmelCase = create_cipher_map(snake_case )
print(func(snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 82 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowercase__ :
def __init__( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = {}
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = {}
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
_UpperCamelCase : str = probability
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return list(self.connections )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = Counter(graph.get_nodes() )
_UpperCamelCase : Dict = start
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = graph.transition(UpperCAmelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
"""simple docstring"""
__UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _snake_case ( lowercase__ : dict[int, list[int]] , lowercase__ : int , lowercase__ : list[bool] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowercase__ , lowercase__ , lowercase__ )
order.append(lowercase__ )
return order
def _snake_case ( lowercase__ : dict[int, list[int]] , lowercase__ : int , lowercase__ : list[bool] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :Optional[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowercase__ , lowercase__ , lowercase__ )
return component
def _snake_case ( lowercase__ : dict[int, list[int]] ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase_ :str = len(lowercase__ ) * [False]
lowerCAmelCase_ :dict[int, list[int]] = {vert: [] for vert in range(len(lowercase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowercase__ )
lowerCAmelCase_ :Any = []
for i, was_visited in enumerate(lowercase__ ):
if not was_visited:
order += topology_sort(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[Any] = len(lowercase__ ) * [False]
for i in range(len(lowercase__ ) ):
lowerCAmelCase_ :str = order[len(lowercase__ ) - i - 1]
if not visited[vert]:
lowerCAmelCase_ :List[Any] = find_components(lowercase__ , lowercase__ , lowercase__ )
components_list.append(lowercase__ )
return components_list
| 84 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = len(snake_case )
while cur > 1:
# Find the maximum number in arr
snake_case_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case_ = arr[mi::-1] + arr[mi + 1 : len(snake_case )]
# Reverse whole list
snake_case_ = arr[cur - 1 :: -1] + arr[cur : len(snake_case )]
cur -= 1
return arr
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = input("Enter numbers separated by a comma:\n").strip()
_SCREAMING_SNAKE_CASE : int = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 85 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase__ = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = create_model(
'HTSAT-tiny' , 'roberta' , _UpperCamelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=_UpperCamelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : str = r'.*sequential.(\d+).*'
__lowerCAmelCase : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase : Union[str, Any] = key.replace(_UpperCamelCase , _UpperCamelCase )
if re.match(_UpperCamelCase , _UpperCamelCase ):
# replace sequential layers with list
__lowerCAmelCase : List[str] = re.match(_UpperCamelCase , _UpperCamelCase ).group(1 )
__lowerCAmelCase : Tuple = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_UpperCamelCase )//3}.linear." )
elif re.match(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = int(re.match(_UpperCamelCase , _UpperCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase : Tuple = 1 if projecton_layer == 0 else 2
__lowerCAmelCase : List[str] = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase : int = value
__lowerCAmelCase : Union[str, Any] = mixed_qkv.size(0 ) // 3
__lowerCAmelCase : Optional[int] = mixed_qkv[:qkv_dim]
__lowerCAmelCase : Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase : Optional[int] = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase : Optional[Any] = query_layer
__lowerCAmelCase : Dict = key_layer
__lowerCAmelCase : str = value_layer
else:
__lowerCAmelCase : Dict = value
return model_state_dict
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = init_clap(_UpperCamelCase , enable_fusion=_UpperCamelCase )
clap_model.eval()
__lowerCAmelCase : Optional[int] = clap_model.state_dict()
__lowerCAmelCase : Dict = rename_state_dict(_UpperCamelCase )
__lowerCAmelCase : List[str] = ClapConfig()
__lowerCAmelCase : List[str] = enable_fusion
__lowerCAmelCase : Dict = ClapModel(_UpperCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
transformers_config.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 86 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase_ ( _lowerCamelCase : List[str]):
if not is_accelerate_available():
return method
lowercase__ : Optional[int] = version.parse(accelerate.__version__).base_version
if version.parse(_lowerCamelCase) < version.parse("0.17.0"):
return method
def wrapper(self : Optional[int] , *_lowerCamelCase : str , **_lowerCamelCase : Optional[Any]):
if hasattr(self , "_hf_hook") and hasattr(self._hf_hook , "pre_forward"):
self._hf_hook.pre_forward(self)
return method(self , *_lowerCamelCase , **_lowerCamelCase)
return wrapper
| 87 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__lowerCAmelCase : str = 637_8137.0
__lowerCAmelCase : Optional[Any] = 635_6752.31_4245
__lowerCAmelCase : List[str] = 6378137
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__magic_name__ = atan((1 - flattening) * tan(radians(A_ ) ) )
__magic_name__ = atan((1 - flattening) * tan(radians(A_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__magic_name__ = haversine_distance(A_, A_, A_, A_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__magic_name__ = (b_lata + b_lata) / 2
__magic_name__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__magic_name__ = (sin(A_ ) ** 2) * (cos(A_ ) ** 2)
__magic_name__ = cos(sigma / 2 ) ** 2
__magic_name__ = (sigma - sin(A_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__magic_name__ = (cos(A_ ) ** 2) * (sin(A_ ) ** 2)
__magic_name__ = sin(sigma / 2 ) ** 2
__magic_name__ = (sigma + sin(A_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> Dict:
_a : str = bnb_quantization_config.load_in_abit
_a : Dict = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_a : str = []
# custom device map
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(device_map.keys() ) > 1:
_a : List[Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_a : Optional[int] = get_keys_to_not_convert(lowerCAmelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase_ )
_a : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_a : List[str] = []
_a : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase_ )
# compatibility with peft
_a : Optional[Any] = load_in_abit
_a : Dict = load_in_abit
_a : Tuple = get_parameter_device(lowerCAmelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_a : int = replace_with_bnb_layers(lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
# convert param to the right dtype
_a : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_a : Any = name.replace('.weight' , '' ).replace('.bias' , '' )
_a : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase_ ):
param.to(lowerCAmelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
_a : str = replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
_a : List[Any] = get_quantized_model_device_map(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_memory=lowerCAmelCase_ , no_split_module_classes=lowerCAmelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_a : Any = True
_a : Union[str, Any] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase_ , offload_state_dict=lowerCAmelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase_ , device_map=lowerCAmelCase_ , offload_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[int]:
if device_map is None:
if torch.cuda.is_available():
_a : List[Any] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_a : Union[str, Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_a : int = {}
_a : str = special_dtypes
_a : Tuple = no_split_module_classes
_a : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_a : Optional[Any] = get_balanced_memory(
lowerCAmelCase_ , low_zero=(device_map == 'balanced_low_0') , max_memory=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a : Optional[Any] = max_memory
_a : Any = infer_auto_device_map(lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# check if don't have any quantized module on the cpu
_a : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_a : Tuple = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
if modules_to_not_convert is None:
_a : Optional[int] = []
_a , _a : Tuple = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> List[Any]:
_a : Any = False
for name, module in model.named_children():
if current_key_name is None:
_a : List[Any] = []
current_key_name.append(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_a : str = '.'.join(lowerCAmelCase_ )
_a : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_a : Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_a : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_a : Optional[Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_a : Optional[Any] = module.weight.data
if module.bias is not None:
_a : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Optional[int] = True
if len(list(module.children() ) ) > 0:
_a , _a : Optional[int] = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
# Create a copy of the model
with init_empty_weights():
_a : Optional[int] = deepcopy(lowerCAmelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_a : int = find_tied_parameters(lowerCAmelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_a : List[str] = sum(lowerCAmelCase_ , [] )
_a : Union[str, Any] = len(lowerCAmelCase_ ) > 0
# Check if it is a base model
_a : str = False
if hasattr(lowerCAmelCase_ , 'base_model_prefix' ):
_a : str = not hasattr(lowerCAmelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : Optional[int] = list(model.named_children() )
_a : int = [list_modules[-1][0]]
# add last module together with tied weights
_a : str = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
_a : Optional[Any] = list(set(lowerCAmelCase_ ) ) + list(lowerCAmelCase_ )
# remove ".weight" from the keys
_a : List[str] = ['.weight', '.bias']
_a : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : int = name.replace(lowerCAmelCase_ , '' )
filtered_module_names.append(lowerCAmelCase_ )
return filtered_module_names
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for m in model.modules():
if isinstance(lowerCAmelCase_ , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
return next(parameter.parameters() ).device
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 0 , dtype=lowerCAmelCase_ , value=lowerCAmelCase_ )
_a : List[Any] = param_name
_a : List[Any] = model
if "." in tensor_name:
_a : Tuple = tensor_name.split('.' )
for split in splits[:-1]:
_a : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
_a : Dict = new_module
_a : List[Any] = splits[-1]
# offload weights
_a : List[str] = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ , )
else:
offload_weight(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
offload_weight(lowerCAmelCase_ , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ )
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 'meta' , dtype=lowerCAmelCase_ , value=torch.empty(*param.size() ) )
| 89 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCAmelCase_ : Tuple = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCAmelCase_ : Union[str, Any] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _A (__a , __a , __a=False , __a=False , __a=True , __a=False , __a="dummy_doc" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {doc: key_lines}
SCREAMING_SNAKE_CASE_ : List[str] = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.get_mention_assignments(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def _A (__a , __a , __a , __a , __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_coref_infos(__a , __a , __a , __a , __a , __a )
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : str = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 1_00:.2f}' , f' Precision: {precision * 1_00:.2f}' , f' F1: {fa * 1_00:.2f}' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ : Tuple = (conll / 3) * 1_00
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _A (__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ : Any = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ : Any = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Dict=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = util.check_gold_parse_annotation(lowercase_)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(
key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , )
return score
| 91 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowerCAmelCase = len(_A ) - 1
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ) , 5 ) == 1
return output_values
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowerCAmelCase = self.basis_function(_A )
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __SCREAMING_SNAKE_CASE( self , _A = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowerCAmelCase = [] # x coordinates of points to plot
__lowerCAmelCase = [] # y coordinates of points to plot
__lowerCAmelCase = 0.0
while t <= 1:
__lowerCAmelCase = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowerCAmelCase = [i[0] for i in self.list_of_points]
__lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
_A , _A , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(_A , _A , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 92 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_lowercase : Dict = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_lowercase : Union[str, Any] = {
"ctrl": 2_5_6,
}
_lowercase : List[Any] = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : List[str] = set()
lowercase_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : List[Any] = char
lowercase_ : Union[str, Any] = set(__SCREAMING_SNAKE_CASE )
return pairs
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = CONTROL_CODES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<unk>" , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(unk_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
lowercase_ : int = json.load(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
lowercase_ : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase_ : Tuple = [tuple(merge.split() ) for merge in merges]
lowercase_ : Union[str, Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowercase_ : str = {}
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def _snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase_ : Tuple = tuple(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase_ : Any = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowercase_ : Optional[Any] = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : str = bigram
lowercase_ : List[str] = []
lowercase_ : Tuple = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
lowercase_ : int = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : int = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : List[Any] = tuple(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowercase_ : Tuple = get_pairs(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = '''@@ '''.join(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = word[:-4]
lowercase_ : Union[str, Any] = word
return word
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
lowercase_ : str = re.findall(R'''\S+\n?''' , __SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) ) )
return split_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = ''' '''.join(__SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
lowercase_ : Dict = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowercase_ : str = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 93 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
snake_case : Optional[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
snake_case : List[str] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
snake_case : Optional[Any] = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
snake_case : List[str] = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
snake_case : int = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
snake_case : Any = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=False ):
"""simple docstring"""
a :Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
a :str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
a :str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
a :List[str] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
a :str = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
a :str = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
a :Optional[int] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
a :Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
a :Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
a :List[Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
a :Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.weight''']
a :str = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None ):
"""simple docstring"""
a , a , a :List[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
a , a , a :Any = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
a :Union[str, Any] = checkpoint[F'''{old_prefix}.norm.weight''']
a :Union[str, Any] = checkpoint[F'''{old_prefix}.norm.bias''']
a :int = weight_q.squeeze(-1 ).squeeze(-1 )
a :Any = bias_q.squeeze(-1 ).squeeze(-1 )
a :Union[str, Any] = weight_k.squeeze(-1 ).squeeze(-1 )
a :str = bias_k.squeeze(-1 ).squeeze(-1 )
a :List[str] = weight_v.squeeze(-1 ).squeeze(-1 )
a :List[str] = bias_v.squeeze(-1 ).squeeze(-1 )
a :Dict = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
a :int = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Any = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
a :Optional[int] = {}
a :Optional[int] = checkpoint['''time_embed.0.weight''']
a :Optional[int] = checkpoint['''time_embed.0.bias''']
a :Any = checkpoint['''time_embed.2.weight''']
a :List[Any] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
a :Optional[Any] = checkpoint['''label_emb.weight''']
a :Optional[int] = checkpoint['''input_blocks.0.0.weight''']
a :List[Any] = checkpoint['''input_blocks.0.0.bias''']
a :List[str] = unet_config['''down_block_types''']
a :Optional[int] = unet_config['''layers_per_block''']
a :int = unet_config['''attention_head_dim''']
a :Optional[int] = unet_config['''block_out_channels''']
a :Union[str, Any] = 1
a :Optional[Any] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
a :str = channels_list[i]
a :int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
a :Dict = F'''down_blocks.{i}.resnets.{j}'''
a :Optional[int] = F'''input_blocks.{current_layer}.0'''
a :Dict = True if j == 0 and downsample_block_has_skip else False
a :Dict = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
a :Any = F'''down_blocks.{i}.resnets.{j}'''
a :Dict = F'''input_blocks.{current_layer}.0'''
a :Optional[Any] = True if j == 0 and downsample_block_has_skip else False
a :Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
a :Tuple = F'''down_blocks.{i}.attentions.{j}'''
a :Union[str, Any] = F'''input_blocks.{current_layer}.1'''
a :Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
a :int = F'''down_blocks.{i}.downsamplers.0'''
a :List[str] = F'''input_blocks.{current_layer}.0'''
a :List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
a :Union[str, Any] = current_channels
# hardcoded the mid-block for now
a :List[str] = '''mid_block.resnets.0'''
a :Any = '''middle_block.0'''
a :Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :int = '''mid_block.attentions.0'''
a :Any = '''middle_block.1'''
a :Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :int = '''mid_block.resnets.1'''
a :Union[str, Any] = '''middle_block.2'''
a :Dict = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :int = 0
a :Any = unet_config['''up_block_types''']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
a :Any = F'''up_blocks.{i}.resnets.{j}'''
a :str = F'''output_blocks.{current_layer}.0'''
a :Dict = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
a :str = F'''up_blocks.{i}.upsamplers.0'''
a :Any = F'''output_blocks.{current_layer-1}.1'''
a :List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
a :Tuple = F'''up_blocks.{i}.resnets.{j}'''
a :Tuple = F'''output_blocks.{current_layer}.0'''
a :List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
a :List[str] = F'''up_blocks.{i}.attentions.{j}'''
a :Dict = F'''output_blocks.{current_layer}.1'''
a :List[str] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
a :Optional[int] = F'''up_blocks.{i}.upsamplers.0'''
a :Optional[Any] = F'''output_blocks.{current_layer-1}.2'''
a :Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Optional[Any] = checkpoint['''out.0.weight''']
a :List[Any] = checkpoint['''out.0.bias''']
a :Tuple = checkpoint['''out.2.weight''']
a :List[str] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
snake_case : Union[str, Any] = parser.parse_args()
snake_case : int = strabool(args.class_cond)
snake_case : Optional[Any] = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
snake_case : Dict = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
snake_case : Any = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
snake_case : Optional[Any] = None
snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
snake_case : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
snake_case : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
snake_case : str = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case : Optional[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
snake_case : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
snake_case : Any = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 94 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
def _A ( SCREAMING_SNAKE_CASE : int = 1_000_000 ):
"""simple docstring"""
a__ : Optional[int] =[i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 95 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase__ = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = SavedModel()
_lowerCamelCase : Optional[int] = []
with open(os.path.join(lowercase__ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
_lowerCamelCase : Any = json.load(lowercase__ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
_lowerCamelCase : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_lowerCamelCase : Union[str, Any] = sorted(lowercase__ )
_lowerCamelCase : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*lowercase__ , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
lowercase__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 96 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case = '''\
'''
__snake_case = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
__snake_case = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = True , UpperCamelCase_=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase__ :Tuple = '''cuda'''
else:
UpperCamelCase__ :Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCamelCase__ :Tuple = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :List[str] = model.to(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase__ :Any = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase__ :str = model.config.max_length - 1
else:
UpperCamelCase__ :List[str] = model.config.max_length
UpperCamelCase__ :List[str] = tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''pt''' , return_attention_mask=UpperCamelCase_ , ).to(UpperCamelCase_ )
UpperCamelCase__ :Tuple = encodings['''input_ids''']
UpperCamelCase__ :str = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase__ :str = []
UpperCamelCase__ :Any = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ) ):
UpperCamelCase__ :int = min(start_index + batch_size , len(UpperCamelCase_ ) )
UpperCamelCase__ :Tuple = encoded_texts[start_index:end_index]
UpperCamelCase__ :Any = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase__ :List[str] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase__ :Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase_ ), attn_mask] , dim=1 )
UpperCamelCase__ :Optional[Any] = encoded_batch
with torch.no_grad():
UpperCamelCase__ :Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ).logits
UpperCamelCase__ :Optional[int] = out_logits[..., :-1, :].contiguous()
UpperCamelCase__ :List[Any] = labels[..., 1:].contiguous()
UpperCamelCase__ :int = attn_mask[..., 1:].contiguous()
UpperCamelCase__ :List[str] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase_ )} | 97 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase__ : str = logging.get_logger(__name__)
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ = deprecated_arg[3:]
setattr(self ,lowerCamelCase__ ,not kwargs.pop(lowerCamelCase__ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase__ = kwargs.pop('torchscript' ,self.torchscript )
UpperCAmelCase__ = kwargs.pop('torch_xla_tpu_print_metrics' ,self.torch_xla_tpu_print_metrics )
UpperCAmelCase__ = kwargs.pop('fp16_opt_level' ,self.fpaa_opt_level )
super().__init__(**lowerCamelCase__ )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Trace the models using torchscript"} )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
snake_case__ = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __lowerCAmelCase ( self : Optional[int] ):
requires_backends(self ,['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCAmelCase__ = torch.device('cpu' )
UpperCAmelCase__ = 0
elif is_torch_tpu_available():
UpperCAmelCase__ = xm.xla_device()
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase__ = torch.cuda.device_count()
return device, n_gpu
@property
def __lowerCAmelCase ( self : Tuple ):
return is_torch_tpu_available() and self.tpu
@property
def __lowerCAmelCase ( self : Any ):
requires_backends(self ,['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowerCAmelCase ( self : Dict ):
requires_backends(self ,['torch'] )
return self._setup_devices[0]
@property
def __lowerCAmelCase ( self : int ):
requires_backends(self ,['torch'] )
return self._setup_devices[1]
@property
def __lowerCAmelCase ( self : List[str] ):
return self.n_gpu > 0
| 98 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def __lowercase ( lowercase) -> Any:
'''simple docstring'''
a__ : int = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowercase ( self , lowercase , lowercase , *lowercase , **lowercase) -> str:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.')
a__ : Any = kwargs.pop('main_process_only' , lowercase)
a__ : Dict = kwargs.pop('in_order' , lowercase)
if self.isEnabledFor(lowercase):
if self._should_log(lowercase):
a__ , a__ : Dict = self.process(lowercase , lowercase)
self.logger.log(lowercase , lowercase , *lowercase , **lowercase)
elif in_order:
a__ : Dict = PartialState()
for i in range(state.num_processes):
if i == state.process_index:
a__ , a__ : List[str] = self.process(lowercase , lowercase)
self.logger.log(lowercase , lowercase , *lowercase , **lowercase)
state.wait_for_everyone()
def A_ ( A__ , A__ = None ) -> Tuple:
if log_level is None:
a__ : Dict = os.environ.get('ACCELERATE_LOG_LEVEL' , A__ )
a__ : str = logging.getLogger(A__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(A__ , {} )
| 99 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : nn.Module
__lowercase : List[nn.Module] = field(default_factory=__a )
__lowercase : list = field(default_factory=__a )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = len(list(m.modules())) == 1 or isinstance(lowerCAmelCase__ , nn.Convad) or isinstance(lowerCAmelCase__ , nn.BatchNormad)
if has_not_submodules:
self.traced.append(lowerCAmelCase__)
def __call__( self , lowerCAmelCase__):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(lowerCAmelCase__)
[x.remove() for x in self.handles]
return self
@property
def snake_case_ ( self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCAmelCase__: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : nn.Module
__lowercase : nn.Module
__lowercase : int = 0
__lowercase : List = field(default_factory=__a )
__lowercase : List = field(default_factory=__a )
def __call__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = Tracker(self.dest)(lowerCAmelCase__).parametrized
__SCREAMING_SNAKE_CASE = Tracker(self.src)(lowerCAmelCase__).parametrized
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.src_skip , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.dest_skip , lowerCAmelCase__))
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise Exception(
f"Numbers of operations are different. Source module has {len(lowerCAmelCase__)} operations while"
f" destination module has {len(lowerCAmelCase__)}.")
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True ):
print(f"Converting {name}..." )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
__SCREAMING_SNAKE_CASE = ResNetForImageClassification(UpperCamelCase_ ).eval()
__SCREAMING_SNAKE_CASE = ModuleTransfer(src=UpperCamelCase_ , dest=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase_ )
assert torch.allclose(from_model(UpperCamelCase_ ) , our_model(UpperCamelCase_ ).logits ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE = f"resnet{'-'.join(name.split('resnet' ) )}"
print(UpperCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=UpperCamelCase_ , )
# we can use the convnext one
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase_ , )
print(f"Pushed {checkpoint_name}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True ):
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = (1, num_labels)
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = partial(UpperCamelCase_ , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(UpperCamelCase_ , names_to_config[model_name] , UpperCamelCase_ , UpperCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__magic_name__ = parser.parse_args()
__magic_name__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 100 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase__ :Dict = logging.getLogger(__name__)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ):
'''simple docstring'''
lowercase = bnb_quantization_config.load_in_abit
lowercase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowercase = []
# custom device map
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(device_map.keys() ) > 1:
lowercase = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase = get_keys_to_not_convert(lowerCAmelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase__ )
lowercase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase = []
lowercase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase__ )
# compatibility with peft
lowercase = load_in_abit
lowercase = load_in_abit
lowercase = get_parameter_device(lowerCAmelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowercase = replace_with_bnb_layers(lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
# convert param to the right dtype
lowercase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase__ ):
param.to(lowerCAmelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
lowercase = replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
lowercase = get_quantized_model_device_map(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_memory=lowerCAmelCase__ , no_split_module_classes=lowerCAmelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase = True
lowercase = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase__ , offload_state_dict=lowerCAmelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase__ , device_map=lowerCAmelCase__ , offload_dir=lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
lowercase = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowercase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase = {}
lowercase = special_dtypes
lowercase = no_split_module_classes
lowercase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase = get_balanced_memory(
lowerCAmelCase__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase = max_memory
lowercase = infer_auto_device_map(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# check if don't have any quantized module on the cpu
lowercase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if modules_to_not_convert is None:
lowercase = []
lowercase , lowercase = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
'''simple docstring'''
lowercase = False
for name, module in model.named_children():
if current_key_name is None:
lowercase = []
current_key_name.append(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase = '''.'''.join(lowerCAmelCase__ )
lowercase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowercase = module.weight.data
if module.bias is not None:
lowercase = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = True
if len(list(module.children() ) ) > 0:
lowercase , lowercase = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Create a copy of the model
with init_empty_weights():
lowercase = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase = sum(lowerCAmelCase__ , [] )
lowercase = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
lowercase = False
if hasattr(lowerCAmelCase__ , '''base_model_prefix''' ):
lowercase = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase = list(model.named_children() )
lowercase = [list_modules[-1][0]]
# add last module together with tied weights
lowercase = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
lowercase = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
lowercase = ['''.weight''', '''.bias''']
lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase = name.replace(lowerCAmelCase__ , '''''' )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for m in model.modules():
if isinstance(lowerCAmelCase__ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return next(parameter.parameters() ).device
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 0 , dtype=lowerCAmelCase__ , value=lowerCAmelCase__ )
lowercase = param_name
lowercase = model
if "." in tensor_name:
lowercase = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
lowercase = new_module
lowercase = splits[-1]
# offload weights
lowercase = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , lowerCAmelCase__ , index=lowerCAmelCase__ , )
else:
offload_weight(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
offload_weight(lowerCAmelCase__ , param_name.replace('''weight''' , '''SCB''' ) , lowerCAmelCase__ , index=lowerCAmelCase__ )
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , '''meta''' , dtype=lowerCAmelCase__ , value=torch.empty(*param.size() ) )
| 101 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 0 |
"""simple docstring"""
def lowercase ( _snake_case : list[list[int]] , _snake_case : int , _snake_case : int , _snake_case : set ) ->int:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = len(_snake_case ), len(grid[0] )
if (
min(_snake_case , _snake_case ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__snake_case : Tuple = 0
count += depth_first_search(_snake_case , row + 1 , _snake_case , _snake_case )
count += depth_first_search(_snake_case , row - 1 , _snake_case , _snake_case )
count += depth_first_search(_snake_case , _snake_case , col + 1 , _snake_case )
count += depth_first_search(_snake_case , _snake_case , col - 1 , _snake_case )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A__ : Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Any , *A_ : Dict , **A_ : Optional[Any]):
super().__init__(*A_ , **A_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def UpperCAmelCase__ ( self : int , A_ : Any=None , A_ : Dict=None , A_ : List[Any]=None):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : Dict = {}
if prompt is not None:
lowerCAmelCase_ : Any = prompt
if generate_kwargs is not None:
lowerCAmelCase_ : Union[str, Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase_ : Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''')
lowerCAmelCase_ : int = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : str , A_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **A_ : Union[str, Any]):
return super().__call__(A_ , **A_)
def UpperCAmelCase__ ( self : Any , A_ : Any , A_ : str=None):
lowerCAmelCase_ : Tuple = load_image(A_)
if prompt is not None:
if not isinstance(A_ , A_):
raise ValueError(
F"""Received an invalid text input, got - {type(A_)} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''')
lowerCAmelCase_ : Tuple = self.model.config.model_type
if model_type == "git":
lowerCAmelCase_ : Tuple = self.image_processor(images=A_ , return_tensors=self.framework)
lowerCAmelCase_ : List[str] = self.tokenizer(text=A_ , add_special_tokens=A_).input_ids
lowerCAmelCase_ : int = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase_ : Union[str, Any] = torch.tensor(A_).unsqueeze(0)
model_inputs.update({'''input_ids''': input_ids})
elif model_type == "pix2struct":
lowerCAmelCase_ : Dict = self.image_processor(images=A_ , header_text=A_ , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase_ : Any = self.image_processor(images=A_ , return_tensors=self.framework)
lowerCAmelCase_ : Optional[Any] = self.tokenizer(A_ , return_tensors=self.framework)
model_inputs.update(A_)
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""")
else:
lowerCAmelCase_ : str = self.image_processor(images=A_ , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase_ : List[Any] = None
return model_inputs
def UpperCAmelCase__ ( self : Any , A_ : Union[str, Any] , A_ : Optional[int]=None):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , A_)
and all(x is None for x in model_inputs['''input_ids'''])
):
lowerCAmelCase_ : Optional[Any] = None
if generate_kwargs is None:
lowerCAmelCase_ : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase_ : Any = model_inputs.pop(self.model.main_input_name)
lowerCAmelCase_ : Tuple = self.model.generate(A_ , **A_ , **A_)
return model_outputs
def UpperCAmelCase__ ( self : int , A_ : int):
lowerCAmelCase_ : Tuple = []
for output_ids in model_outputs:
lowerCAmelCase_ : Optional[int] = {
'''generated_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , )
}
records.append(A_)
return records
| 103 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _A ( *A__ , A__ = None , A__=True , A__=2 ):
"""simple docstring"""
from .. import __version__
__lowercase = take_from
__lowercase = ()
if not isinstance(args[0] , A__ ):
__lowercase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(A__ ).base_version ) >= version.parse(A__ ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
__lowercase = None
if isinstance(A__ , A__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(A__ ),)
__lowercase = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(A__ , A__ ):
values += (getattr(A__ , A__ ),)
__lowercase = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__lowercase = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__lowercase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , A__ , stacklevel=A__ )
if isinstance(A__ , A__ ) and len(A__ ) > 0:
__lowercase = inspect.getouterframes(inspect.currentframe() )[1]
__lowercase = call_frame.filename
__lowercase = call_frame.lineno
__lowercase = call_frame.function
__lowercase , __lowercase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(A__ ) == 0:
return
elif len(A__ ) == 1:
return values[0]
return values
| 104 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 105 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileBertTokenizer
lowercase__ = MobileBertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = filter_non_english
lowercase__ = "google/mobilebert-uncased"
def __lowerCAmelCase ( self : Any ):
super().setUp()
lowerCAmelCase__ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCAmelCase__ : str = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowerCAmelCase ( self : str ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Optional[int] = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : Tuple = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowercase_ ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) ,[9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Dict = self.get_rust_tokenizer()
lowerCAmelCase__ : str = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : Dict = tokenizer.tokenize(lowercase_ )
lowerCAmelCase__ : str = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = rust_tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(lowercase_ )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
# With lower casing
lowerCAmelCase__ : int = self.get_tokenizer(do_lower_case=lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.get_rust_tokenizer(do_lower_case=lowercase_ )
lowerCAmelCase__ : Optional[Any] = '''UNwant\u00E9d,running'''
lowerCAmelCase__ : List[Any] = tokenizer.tokenize(lowercase_ )
lowerCAmelCase__ : Any = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = rust_tokenizer.encode(lowercase_ ,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : int = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(lowercase_ )
lowerCAmelCase__ : List[str] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Any = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=lowercase_ ,strip_accents=lowercase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=lowercase_ ,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCAmelCase__ : Tuple = {}
for i, token in enumerate(lowercase_ ):
lowerCAmelCase__ : Optional[Any] = i
lowerCAmelCase__ : List[Any] = WordpieceTokenizer(vocab=lowercase_ ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCAmelCase ( self : Dict ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : List[Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : Any ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase_ ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('''sequence builders''' ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ ,lowercase_ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowerCAmelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : List[str] = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase__ : Union[str, Any] = tokenizer_r.encode_plus(
lowercase_ ,return_attention_mask=lowercase_ ,return_token_type_ids=lowercase_ ,return_offsets_mapping=lowercase_ ,add_special_tokens=lowercase_ ,)
lowerCAmelCase__ : List[Any] = tokenizer_r.do_lower_case if hasattr(lowercase_ ,'''do_lower_case''' ) else False
lowerCAmelCase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Union[str, Any] = ['''的''', '''人''', '''有''']
lowerCAmelCase__ : Optional[Any] = ''''''.join(lowercase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Dict = tokenizer_p.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_r.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowerCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase_ ,lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Any = tokenizer_r.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : List[str] = tokenizer_p.encode(lowercase_ ,add_special_tokens=lowercase_ )
lowerCAmelCase__ : Any = tokenizer_r.convert_ids_to_tokens(lowercase_ )
lowerCAmelCase__ : Dict = tokenizer_p.convert_ids_to_tokens(lowercase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ : int = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(lowercase_ )
]
self.assertListEqual(lowercase_ ,lowercase_ )
self.assertListEqual(lowercase_ ,lowercase_ )
| 106 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( A : NDArray[floataa], A : NDArray[floataa], A : list[int], A : int, ):
'''simple docstring'''
a , a = coefficient_matrix.shape
a , a = constant_matrix.shape
if rowsa != colsa:
a = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
a = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
a = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
a = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
a = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
a , a = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
a = []
for row in range(A ):
a = 0
for col in range(A ):
if col == row:
a = table[row][col]
elif col == cols - 1:
a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a = (temp + val) / denom
new_val.append(A )
a = new_val
return [float(A ) for i in new_val]
def __magic_name__ ( A : NDArray[floataa] ):
'''simple docstring'''
a , a = table.shape
a = True
for i in range(0, A ):
a = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
"""simple docstring"""
lowerCAmelCase__ = range(2, 20 + 1)
lowerCAmelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase : int = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase , lowerCAmelCase : List[Any] = 0, 0
lowerCAmelCase : int = n - i
lowerCAmelCase : Optional[Any] = memo.get(SCREAMING_SNAKE_CASE )
if sub_memo is not None:
lowerCAmelCase : Dict = sub_memo.get(SCREAMING_SNAKE_CASE )
if jumps is not None and len(SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
lowerCAmelCase : int = -1
for _k in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase : str = _k
break
if max_jump >= 0:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase : Optional[int] = diff + c
for j in range(min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ):
lowerCAmelCase , lowerCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE , 1_0 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Dict = []
else:
lowerCAmelCase : Union[str, Any] = {c: []}
lowerCAmelCase : List[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase , lowerCAmelCase : Any = next_term(SCREAMING_SNAKE_CASE , k - 1 , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase , lowerCAmelCase : Dict = compute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
lowerCAmelCase : Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase : Optional[Any] = 0
while j < len(SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase : int = i
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase : Dict = ds_c + ds_b
diff += addend
lowerCAmelCase : str = 0
for j in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = a_i[j] + addend
lowerCAmelCase , lowerCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return diff, i - start_i
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : Optional[int] = digits[j] + addend
if s >= 1_0:
lowerCAmelCase , lowerCAmelCase : Dict = divmod(SCREAMING_SNAKE_CASE , 1_0 )
lowerCAmelCase : str = addend // 1_0 + quotient
else:
lowerCAmelCase : List[Any] = s
lowerCAmelCase : Dict = addend // 1_0
if addend == 0:
break
while addend > 0:
lowerCAmelCase , lowerCAmelCase : List[str] = divmod(SCREAMING_SNAKE_CASE , 1_0 )
digits.append(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0**1_5 ):
'''simple docstring'''
lowerCAmelCase : Any = [1]
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Optional[Any] = 0
while True:
lowerCAmelCase , lowerCAmelCase : Any = next_term(SCREAMING_SNAKE_CASE , 2_0 , i + dn , SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase : Any = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 108 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
UpperCAmelCase : Any = [True] * (num + 1)
UpperCAmelCase : Any = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
UpperCAmelCase : int = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A: int = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 109 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 0 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , exponent // 2 , SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE , exponent - 1 , SCREAMING_SNAKE_CASE )) % modulo_value
def _a ( SCREAMING_SNAKE_CASE = 17_77 , SCREAMING_SNAKE_CASE = 18_55 , SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
lowercase__ = base
for _ in range(1 , SCREAMING_SNAKE_CASE ):
lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 110 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class _snake_case ( _snake_case ):
lowerCAmelCase_ : Optional[int] = '''AutoTokenizer'''
lowerCAmelCase_ : List[str] = ['''tokenizer''']
lowerCAmelCase_ : List[str] = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , a__ , a__=None ) -> int:
'''simple docstring'''
super().__init__(_UpperCamelCase )
snake_case_ = speaker_embeddings
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__="speaker_embeddings_path.json" , **a__ ) -> Any:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
snake_case_ = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , subfolder=kwargs.pop("subfolder" , _UpperCamelCase ) , cache_dir=kwargs.pop("cache_dir" , _UpperCamelCase ) , force_download=kwargs.pop("force_download" , _UpperCamelCase ) , proxies=kwargs.pop("proxies" , _UpperCamelCase ) , resume_download=kwargs.pop("resume_download" , _UpperCamelCase ) , local_files_only=kwargs.pop("local_files_only" , _UpperCamelCase ) , use_auth_token=kwargs.pop("use_auth_token" , _UpperCamelCase ) , revision=kwargs.pop("revision" , _UpperCamelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(_UpperCamelCase , _UpperCamelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
snake_case_ = None
else:
with open(_UpperCamelCase ) as speaker_embeddings_json:
snake_case_ = json.load(_UpperCamelCase )
else:
snake_case_ = None
snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
return cls(tokenizer=_UpperCamelCase , speaker_embeddings=_UpperCamelCase )
def lowerCAmelCase__ ( self , a__ , a__="speaker_embeddings_path.json" , a__="speaker_embeddings" , a__ = False , **a__ , ) -> Optional[int]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCamelCase , _UpperCamelCase , "v2" ) , exist_ok=_UpperCamelCase )
snake_case_ = {}
snake_case_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
snake_case_ = self._load_voice_preset(_UpperCamelCase )
snake_case_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _UpperCamelCase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=_UpperCamelCase , )
snake_case_ = os.path.join(_UpperCamelCase , F'{prompt_key}_{key}.npy' )
snake_case_ = tmp_dict
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , "w" ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
super().save_pretrained(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase__ ( self , a__ = None , **a__ ) -> int:
'''simple docstring'''
snake_case_ = self.speaker_embeddings[voice_preset]
snake_case_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
snake_case_ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _UpperCamelCase ) , cache_dir=kwargs.pop("cache_dir" , _UpperCamelCase ) , force_download=kwargs.pop("force_download" , _UpperCamelCase ) , proxies=kwargs.pop("proxies" , _UpperCamelCase ) , resume_download=kwargs.pop("resume_download" , _UpperCamelCase ) , local_files_only=kwargs.pop("local_files_only" , _UpperCamelCase ) , use_auth_token=kwargs.pop("use_auth_token" , _UpperCamelCase ) , revision=kwargs.pop("revision" , _UpperCamelCase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
snake_case_ = np.load(_UpperCamelCase )
return voice_preset_dict
def lowerCAmelCase__ ( self , a__ = None ) -> Optional[int]:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , a__=None , a__=None , a__="pt" , a__=256 , a__=False , a__=True , a__=False , **a__ , ) -> List[str]:
'''simple docstring'''
if voice_preset is not None and not isinstance(_UpperCamelCase , _UpperCamelCase ):
if (
isinstance(_UpperCamelCase , _UpperCamelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
snake_case_ = self._load_voice_preset(_UpperCamelCase )
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and not voice_preset.endswith(".npz" ):
snake_case_ = voice_preset + '.npz'
snake_case_ = np.load(_UpperCamelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
snake_case_ = self.tokenizer(
_UpperCamelCase , return_tensors=_UpperCamelCase , padding="max_length" , max_length=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
if voice_preset is not None:
snake_case_ = voice_preset
return encoded_text
| 85 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A__ ( _snake_case ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict ) -> float:
"""simple docstring"""
return 0.0
def __UpperCAmelCase ( a_: np.ndarray, a_: int ):
_UpperCAmelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCAmelCase : Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __UpperCAmelCase ( a_: FilterType, a_: int ):
_UpperCAmelCase : List[str] = 512
_UpperCAmelCase : str = [1] + [0] * (size - 1)
_UpperCAmelCase : Optional[Any] = [filter_type.process(__snake_case ) for item in inputs]
_UpperCAmelCase : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase : Optional[int] = np.abs(np.fft.fft(__snake_case ) )
_UpperCAmelCase : List[str] = 20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_UpperCAmelCase : Union[str, Any] = get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__snake_case )
plt.show()
def __UpperCAmelCase ( a_: FilterType, a_: int ):
_UpperCAmelCase : int = 512
_UpperCAmelCase : Tuple = [1] + [0] * (size - 1)
_UpperCAmelCase : Tuple = [filter_type.process(__snake_case ) for item in inputs]
_UpperCAmelCase : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase : Dict = np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show() | 145 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """spiece.model"""}
__snake_case = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
__snake_case = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
__snake_case = """▁"""
class _lowerCAmelCase ( _snake_case ):
__UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="[CLS]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<unk>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<pad>" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
'''simple docstring'''
snake_case : Dict = (
AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else mask_token
)
snake_case : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case : List[str] = do_lower_case
snake_case : Optional[Any] = remove_space
snake_case : Union[str, Any] = keep_accents
snake_case : Any = vocab_file
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = self.__dict__.copy()
snake_case : Any = None
return state
def __setstate__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if self.remove_space:
snake_case : Tuple = ' '.join(inputs.strip().split() )
else:
snake_case : List[Any] = inputs
snake_case : int = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
snake_case : int = unicodedata.normalize("NFKD" , _UpperCamelCase )
snake_case : str = ''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] )
if self.do_lower_case:
snake_case : Optional[int] = outputs.lower()
return outputs
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.preprocess_text(_UpperCamelCase )
snake_case : int = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
snake_case : Any = []
for piece in pieces:
if len(_UpperCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
snake_case : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case : Union[str, Any] = cur_pieces[1:]
else:
snake_case : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCamelCase )
else:
new_pieces.append(_UpperCamelCase )
return new_pieces
def lowerCamelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.sp_model.PieceToId(_UpperCamelCase )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.sp_model.IdToPiece(_UpperCamelCase )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = []
snake_case : List[Any] = ''
snake_case : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case : Tuple = True
snake_case : Optional[int] = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case : Optional[Any] = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : Tuple = [self.sep_token_id]
snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : int = [self.sep_token_id]
snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case : int = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 203 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=None ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = True
while ask_again:
__lowerCamelCase = input(__snake_case )
try:
if default is not None and len(__snake_case ) == 0:
return default
return convert_value(__snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__snake_case )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=[] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=0 ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = BulletMenu(__snake_case , __snake_case )
__lowerCamelCase = menu.run(default_choice=__snake_case )
return convert_value(__snake_case ) if convert_value is not None else result
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = int(__snake_case )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = int(__snake_case )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
__lowerCamelCase = int(__snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = int(__snake_case )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = int(__snake_case )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowerCAmelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = super()._format_usage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCamelCase = usage.replace('<command> [<args>] ' , '' )
return usage
| 90 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : Dict ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
lowerCAmelCase = head.next, head
while fast and fast.next:
lowerCAmelCase = fast.next.next
lowerCAmelCase = slow.next
lowerCAmelCase = slow.next
lowerCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase = None
while second:
lowerCAmelCase = second.next
lowerCAmelCase = node
lowerCAmelCase = second
lowerCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase = node.next
lowerCAmelCase = head.next
return True
def lowercase (snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase = head
while fast and fast.next:
lowerCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase = [slow.val]
while slow.next:
lowerCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase = cur.next
return True
def lowercase (snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not head or not head.next:
return True
lowerCAmelCase = {}
lowerCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
lowerCAmelCase = [pos]
lowerCAmelCase = head.next
pos += 1
lowerCAmelCase = pos - 1
lowerCAmelCase = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
lowerCAmelCase = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 155 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 0 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( _snake_case ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''xlm-prophetnet'''
_lowerCamelCase: Dict = ['''past_key_values''']
_lowerCamelCase: List[str] = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : Optional[Any] ,A_ : List[Any] = 0.1 ,A_ : List[Any] = "gelu" ,A_ : Optional[int] = 3_0522 ,A_ : Any = 1024 ,A_ : Union[str, Any] = 4096 ,A_ : Any = 12 ,A_ : Union[str, Any] = 16 ,A_ : Optional[Any] = 4096 ,A_ : List[str] = 12 ,A_ : Tuple = 16 ,A_ : Any = 0.1 ,A_ : Any = 0.1 ,A_ : List[str] = 512 ,A_ : List[Any] = 0.02 ,A_ : Dict = True ,A_ : Optional[int] = True ,A_ : Dict = 0 ,A_ : Optional[Any] = 2 ,A_ : Dict = 32 ,A_ : Tuple = 128 ,A_ : Dict = False ,A_ : Any = 0.0 ,A_ : Any = True ,A_ : List[Any] = 0 ,A_ : List[Any] = 1 ,A_ : Any = 2 ,**A_ : Union[str, Any] ,) -> Tuple:
A = vocab_size
A = hidden_size
A = encoder_ffn_dim
A = num_encoder_layers
A = num_encoder_attention_heads
A = decoder_ffn_dim
A = num_decoder_layers
A = num_decoder_attention_heads
A = max_position_embeddings
A = init_std # Normal(0, this parameter)
A = activation_function
# parameters for xlmprophetnet
A = ngram
A = num_buckets
A = relative_max_distance
A = disable_ngram_loss
A = eps
# 3 Types of Dropout
A = attention_dropout
A = activation_dropout
A = dropout
A = use_cache
super().__init__(
pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,is_encoder_decoder=_UpperCamelCase ,add_cross_attention=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,**_UpperCamelCase ,)
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Dict ) -> int:
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' ) | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
# Lint as: python3
import itertools
import os
import re
_A : Optional[Any] = re.compile(r'([A-Z]+)([A-Z][a-z])')
_A : str = re.compile(r'([a-z\d])([A-Z])')
_A : Optional[Any] = re.compile(r'(?<!_)_(?!_)')
_A : str = re.compile(r'(_{2,})')
_A : str = r'^\w+(\.\w+)*$'
_A : Optional[Any] = r'<>:/\|?*'
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[str] = _uppercase_uppercase_re.sub(R'''\1_\2''' , __snake_case )
lowerCamelCase__ : List[Any] = _lowercase_uppercase_re.sub(R'''\1_\2''' , __snake_case )
return name.lower()
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : int = _single_underscore_re.split(__snake_case )
lowerCamelCase__ : Union[str, Any] = [_multiple_underscores_re.split(__snake_case ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__snake_case ) if n != '''''' )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
if os.path.basename(__snake_case ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__snake_case )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if os.path.basename(__snake_case ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __snake_case ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(__snake_case )}-{split}"
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> str:
"""simple docstring"""
lowerCamelCase__ : Tuple = filename_prefix_for_split(__snake_case , __snake_case )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowerCamelCase__ : List[str] = os.path.join(__snake_case , __snake_case )
return f"{filepath}*"
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Tuple = filename_prefix_for_split(__snake_case , __snake_case )
lowerCamelCase__ : List[str] = os.path.join(__snake_case , __snake_case )
if shard_lengths:
lowerCamelCase__ : int = len(__snake_case )
lowerCamelCase__ : Dict = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__snake_case )]
if filetype_suffix:
lowerCamelCase__ : str = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowerCamelCase__ : List[str] = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 142 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCAmelCase__ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = 9
lowerCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase = kruskal(__snake_case , __snake_case )
lowerCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__snake_case ) == sorted(__snake_case )
| 252 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Namespace ) -> List[str]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
A__: Any = '''\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'''
class A__ ( _snake_case ):
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Any ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=_UpperCamelCase , required=_UpperCamelCase , help="""Model\'s type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=_UpperCamelCase , required=_UpperCamelCase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=_UpperCamelCase , required=_UpperCamelCase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=_UpperCamelCase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=_UpperCamelCase , default=_UpperCamelCase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=_UpperCamelCase )
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , *SCREAMING_SNAKE_CASE :Any , ) -> Union[str, Any]:
'''simple docstring'''
_a : Dict =logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"Loading model {model_type}" )
_a : Dict =model_type
_a : Optional[int] =tf_checkpoint
_a : Tuple =pytorch_dump_output
_a : int =config
_a : Optional[int] =finetuning_task_name
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
_a : Union[str, Any] =self._tf_checkpoint
_a : Tuple =''
else:
_a : List[Any] =self._tf_checkpoint
_a : str =''
convert_transfo_xl_checkpoint_to_pytorch(
_UpperCamelCase , self._config , self._pytorch_dump_output , _UpperCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 276 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.