code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
A: List[Any] = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A: Optional[int] = [{"type": "code", "content": INSTALL_CONTENT}]
A: Dict = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 7 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
A: List[Any] = re.compile(r"([A-Z]+)([A-Z][a-z])")
A: Dict = re.compile(r"([a-z\d])([A-Z])")
A: List[Any] = re.compile(r"(?<!_)_(?!_)")
A: Tuple = re.compile(r"(_{2,})")
A: List[str] = r"^\w+(\.\w+)*$"
A: Any = r"<>:/\|?*"
def _UpperCAmelCase ( a : Any ) -> List[str]:
"""simple docstring"""
lowercase_ : str = _uppercase_uppercase_re.sub(R'\1_\2' , a )
lowercase_ : Dict = _lowercase_uppercase_re.sub(R'\1_\2' , a )
return name.lower()
def _UpperCAmelCase ( a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Optional[int] = _single_underscore_re.split(a )
lowercase_ : List[str] = [_multiple_underscores_re.split(a ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(a ) if n != '' )
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
if os.path.basename(a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(a )
def _UpperCAmelCase ( a : Optional[Any] , a : Optional[Any] ) -> Tuple:
"""simple docstring"""
if os.path.basename(a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , a ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(a )}-{split}"
def _UpperCAmelCase ( a : int , a : List[Any] , a : Optional[int] , a : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Dict = filename_prefix_for_split(a , a )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase_ : str = os.path.join(a , a )
return f"{filepath}*"
def _UpperCAmelCase ( a : Dict , a : int , a : Optional[Any] , a : List[Any]=None , a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Any = filename_prefix_for_split(a , a )
lowercase_ : Dict = os.path.join(a , a )
if shard_lengths:
lowercase_ : int = len(a )
lowercase_ : int = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(a )]
if filetype_suffix:
lowercase_ : Any = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase_ : Any = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowercase_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : int = DisjunctiveConstraint(_lowercase )
self.assertTrue(isinstance(dc.token_ids , _lowercase ) )
with self.assertRaises(_lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowercase_ : Optional[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowercase ):
DisjunctiveConstraint(_lowercase ) # fails here
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Any = DisjunctiveConstraint(_lowercase )
lowercase_ , lowercase_ , lowercase_ : List[Any] = dc.update(1 )
lowercase_ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
lowercase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(_lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(3 )
lowercase_ : int = stepped is True and completed is True and reset is False
self.assertTrue(_lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : int = DisjunctiveConstraint(_lowercase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A: int = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __magic_name__ ( unittest.TestCase, UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : List[str] = load_tool('text-classification' )
self.tool.setup()
lowercase_ : Dict = load_tool('text-classification' , remote=_lowercase )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[Any] = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Tuple = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Any = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
| 7 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 1 |
'''simple docstring'''
from math import factorial, pi
def _UpperCAmelCase ( a : float , a : int = 3_0 ) -> float:
"""simple docstring"""
if not isinstance(a , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
lowercase_ : Dict = float(a )
lowercase_ : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a ) )
def _UpperCAmelCase ( a : float , a : int = 3_0 ) -> float:
"""simple docstring"""
if not isinstance(a , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(a , a ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
lowercase_ : int = float(a )
lowercase_ : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: Tuple = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'altclip_text_model'
def __init__( self , _lowercase=25_0002 , _lowercase=1024 , _lowercase=24 , _lowercase=16 , _lowercase=4096 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=514 , _lowercase=1 , _lowercase=0.02 , _lowercase=0.02 , _lowercase=1E-0_5 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=768 , **_lowercase , ) -> str:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Tuple = vocab_size
lowercase_ : str = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : List[str] = hidden_act
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Any = initializer_range
lowercase_ : str = initializer_factor
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Union[str, Any] = position_embedding_type
lowercase_ : int = use_cache
lowercase_ : List[Any] = project_dim
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'altclip_vision_model'
def __init__( self , _lowercase=768 , _lowercase=3072 , _lowercase=512 , _lowercase=12 , _lowercase=12 , _lowercase=3 , _lowercase=224 , _lowercase=32 , _lowercase="quick_gelu" , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , **_lowercase , ) -> Optional[int]:
super().__init__(**_lowercase )
lowercase_ : List[str] = hidden_size
lowercase_ : Any = intermediate_size
lowercase_ : int = projection_dim
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = num_channels
lowercase_ : Optional[Any] = patch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = initializer_factor
lowercase_ : Any = attention_dropout
lowercase_ : str = layer_norm_eps
lowercase_ : Tuple = hidden_act
@classmethod
def lowerCamelCase__ ( cls , _lowercase , **_lowercase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase )
lowercase_ , lowercase_ : int = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
lowercase_ : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'altclip'
SCREAMING_SNAKE_CASE_ : Any = True
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=768 , _lowercase=2.65_92 , **_lowercase ) -> int:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ : List[str] = kwargs.pop('text_config_dict' , _lowercase )
lowercase_ : Optional[Any] = kwargs.pop('vision_config_dict' , _lowercase )
super().__init__(**_lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ : List[str] = {}
# This is the complete result when using `text_config_dict`.
lowercase_ : str = AltCLIPTextConfig(**_lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ : int = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[str] = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ : Dict = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ : Optional[Any] = AltCLIPVisionConfig(**_lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ : List[str] = {
str(_lowercase ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ : Union[str, Any] = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : Any = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(_lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ : List[Any] = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
lowercase_ : Optional[Any] = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
lowercase_ : int = AltCLIPTextConfig(**_lowercase )
lowercase_ : Any = AltCLIPVisionConfig(**_lowercase )
lowercase_ : Union[str, Any] = projection_dim
lowercase_ : List[Any] = logit_scale_init_value
lowercase_ : str = 1.0
@classmethod
def lowerCamelCase__ ( cls , _lowercase , _lowercase , **_lowercase ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.text_config.to_dict()
lowercase_ : int = self.vision_config.to_dict()
lowercase_ : Optional[int] = self.__class__.model_type
return output
| 7 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A: List[Any] = None
A: Dict = logging.get_logger(__name__)
A: Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A: List[str] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
A: Optional[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
A: str = "▁"
# Segments (not really needed)
A: str = 0
A: Any = 1
A: Union[str, Any] = 2
A: str = 3
A: List[str] = 4
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'left'
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase="<s>" , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<sep>" , _lowercase="<pad>" , _lowercase="<cls>" , _lowercase="<mask>" , _lowercase=["<eop>", "<eod>"] , **_lowercase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
lowercase_ : Optional[int] = 3
lowercase_ : List[str] = do_lower_case
lowercase_ : Tuple = remove_space
lowercase_ : List[Any] = keep_accents
lowercase_ : int = vocab_file
lowercase_ : List[Any] = False if not self.vocab_file else True
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : str = [self.sep_token_id]
lowercase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : int = [self.sep_token_id]
lowercase_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ : Any = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A: Tuple = "bart"
A: Optional[Any] = True
@st.cache(allow_output_mutation=a )
def _UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
lowercase_ : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
lowercase_ : Union[str, Any] = qar_model.eval()
else:
lowercase_ , lowercase_ : List[Any] = (None, None)
if MODEL_TYPE == "bart":
lowercase_ : List[str] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
lowercase_ : str = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
lowercase_ : List[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
lowercase_ : Union[str, Any] = sas_model.eval()
else:
lowercase_ , lowercase_ : List[Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowercase_ : Any = faiss.StandardGpuResources()
lowercase_ : Any = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
lowercase_ : Optional[Any] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_2_8) , )
lowercase_ : List[str] = faiss.IndexFlatIP(1_2_8 )
lowercase_ : Tuple = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
lowercase_ , lowercase_ : Any = (None, None)
lowercase_ : Any = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
lowercase_ : Tuple = elia['train_eli5']
lowercase_ : List[Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_2_8) )
lowercase_ : Optional[int] = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
A , A , A: List[str] = load_indexes()
A , A , A , A: List[str] = load_models()
A , A: List[str] = load_train_data()
def _UpperCAmelCase ( a : Union[str, Any] , a : Any=1_0 ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = embed_questions_for_retrieval([question] , a , a )
lowercase_ , lowercase_ : Optional[int] = eli5_train_q_index.search(a , a )
lowercase_ : int = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _UpperCAmelCase ( a : Dict , a : int="wiki40b" , a : Optional[int]="dense" , a : int=1_0 ) -> Dict:
"""simple docstring"""
if source == "none":
lowercase_ , lowercase_ : Dict = (' <P> '.join(['' for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
lowercase_ , lowercase_ : List[str] = query_qa_dense_index(
a , a , a , a , a , a )
else:
lowercase_ , lowercase_ : Any = query_es_index(
a , a , index_name='english_wiki40b_snippets_100w' , n_results=a , )
lowercase_ : Optional[int] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
lowercase_ : Tuple = 'question: {} context: {}'.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _UpperCAmelCase ( a : Tuple , a : Optional[int] , a : List[Any] , a : List[Any]=6_4 , a : str=2_5_6 , a : List[str]=False , a : Optional[Any]=2 , a : Optional[int]=0.95 , a : Tuple=0.8 ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
lowercase_ : List[Any] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_0_2_4 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
A: int = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
A: int = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A: Dict = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
A: List[str] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
A: Dict = st.sidebar.checkbox("Demo options")
if demo_options:
A: Optional[Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
A: Optional[Any] = action_list.index(action_st)
A: str = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
A: List[Any] = show_type == "Show full text of passages"
else:
A: Any = 3
A: str = True
A: Optional[Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
A: List[str] = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
A: Dict = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
A: Any = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
A: Dict = "wiki40b"
A: List[str] = "dense"
A: int = "beam"
A: Optional[int] = 2
A: Dict = 6_4
A: Dict = 2_5_6
A: str = None
A: Dict = None
A: Optional[int] = st.sidebar.checkbox("Generation options")
if generate_options:
A: str = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
A: Optional[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
A: Dict = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
A: Union[str, Any] = st.sidebar.slider(
"Maximum generation length", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
A: Optional[Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A: int = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A: Optional[Any] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A: Union[str, Any] = None
# start main text
A: Tuple = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
A: str = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A: Any = st.text_input("Enter your question here:", "")
else:
A: Any = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
A , A: Optional[int] = make_support(question, source=wiki_source, method="dense", n_results=1_0)
A , A: List[Any] = make_support(question, source=wiki_source, method="sparse", n_results=1_0)
A: Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A: int = support_list[:1_0]
A: Optional[int] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
A , A: Dict = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
A , A: List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
A: Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
A: List[Any] = res[1].strip()
if sec_titles == "":
A: Tuple = "[{}]({})".format(res[0], wiki_url)
else:
A: int = sec_titles.split(" & ")
A: Any = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
A: List[Any] = find_nearest_training(question)
A: Union[str, Any] = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
A: List[Any] = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
A: Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A: Any = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A: Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
super().__init__(*_lowercase , **_lowercase )
requires_backends(self , 'vision' )
self.check_model_type(_lowercase )
def __call__( self , _lowercase , **_lowercase ) -> int:
return super().__call__(_lowercase , **_lowercase )
def lowerCamelCase__ ( self , **_lowercase ) -> Optional[int]:
return {}, {}, {}
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : List[Any] = load_image(_lowercase )
lowercase_ : str = image.size
lowercase_ : Union[str, Any] = self.image_processor(images=_lowercase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = self.model(**_lowercase )
return model_outputs
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
lowercase_ : int = model_outputs.predicted_depth
lowercase_ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=_lowercase )
lowercase_ : str = prediction.squeeze().cpu().numpy()
lowercase_ : List[str] = (output * 255 / np.max(_lowercase )).astype('uint8' )
lowercase_ : Union[str, Any] = Image.fromarray(_lowercase )
lowercase_ : List[Any] = {}
lowercase_ : Dict = predicted_depth
lowercase_ : Optional[Any] = depth
return output_dict
| 7 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : int ) -> int:
"""simple docstring"""
lowercase_ : Union[str, Any] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _UpperCAmelCase ( a : int = 1_0_0 ) -> int:
"""simple docstring"""
lowercase_ : Dict = 1
lowercase_ : Optional[int] = 2
for i in range(2 , max_n + 1 ):
lowercase_ : Any = pre_numerator
lowercase_ : int = 2 * i // 3 if i % 3 == 0 else 1
lowercase_ : Optional[Any] = cur_numerator
lowercase_ : int = e_cont * pre_numerator + temp
return sum_digits(a )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 1 |
'''simple docstring'''
import math
import qiskit
def _UpperCAmelCase ( a : int = 1 , a : int = 1 , a : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(a , a )
or isinstance(a , a )
or isinstance(a , a )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(a ) != input_a)
or (math.floor(a ) != input_a)
or (math.floor(a ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
lowercase_ : Optional[Any] = qiskit.QuantumRegister(4 , 'qr' )
lowercase_ : Dict = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
lowercase_ : Dict = [input_a, input_a, carry_in]
lowercase_ : Optional[Any] = qiskit.QuantumCircuit(a , a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a ) # measure the last two qbits
lowercase_ : Dict = qiskit.Aer.get_backend('aer_simulator' )
lowercase_ : Tuple = qiskit.execute(a , a , shots=1_0_0_0 )
return job.result().get_counts(a )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 7 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A: List[str] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : str , a : str ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Dict = b.T
lowercase_ : str = np.sum(np.square(a ) , axis=1 )
lowercase_ : Optional[Any] = np.sum(np.square(a ) , axis=0 )
lowercase_ : Optional[int] = np.matmul(a , a )
lowercase_ : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[Any] = x.reshape(-1 , 3 )
lowercase_ : Union[str, Any] = squared_euclidean_distance(a , a )
return np.argmin(a , axis=1 )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ['pixel_values']
def __init__( self , _lowercase = None , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BILINEAR , _lowercase = True , _lowercase = True , **_lowercase , ) -> None:
super().__init__(**_lowercase )
lowercase_ : Optional[Any] = size if size is not None else {'height': 256, 'width': 256}
lowercase_ : Optional[Any] = get_size_dict(_lowercase )
lowercase_ : List[str] = np.array(_lowercase ) if clusters is not None else None
lowercase_ : Dict = do_resize
lowercase_ : Dict = size
lowercase_ : Optional[int] = resample
lowercase_ : Dict = do_normalize
lowercase_ : Tuple = do_color_quantize
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BILINEAR , _lowercase = None , **_lowercase , ) -> np.ndarray:
lowercase_ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
_lowercase , size=(size['height'], size['width']) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , ) -> np.ndarray:
lowercase_ : Optional[Any] = rescale(image=_lowercase , scale=1 / 1_27.5 , data_format=_lowercase )
lowercase_ : Optional[Any] = image - 1
return image
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
lowercase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Any = get_size_dict(_lowercase )
lowercase_ : Any = resample if resample is not None else self.resample
lowercase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase_ : Union[str, Any] = clusters if clusters is not None else self.clusters
lowercase_ : Dict = np.array(_lowercase )
lowercase_ : str = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Tuple = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
lowercase_ : Dict = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_normalize:
lowercase_ : str = [self.normalize(image=_lowercase ) for image in images]
if do_color_quantize:
lowercase_ : Tuple = [to_channel_dimension_format(_lowercase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase_ : List[Any] = np.array(_lowercase )
lowercase_ : Optional[int] = color_quantize(_lowercase , _lowercase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase_ : Optional[int] = images.shape[0]
lowercase_ : Optional[Any] = images.reshape(_lowercase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase_ : List[str] = list(_lowercase )
else:
lowercase_ : List[str] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
lowercase_ : int = {'input_ids': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ) -> str:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_lowercase ):
lowercase_ : Optional[Any] = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase_ : int = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_lowercase ):
lowercase_ : List[str] = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase_ : Optional[Any] = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def lowerCamelCase__ ( self ) -> Tuple:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(_lowercase )
lowercase_ : Dict = FlaxBertModel.from_pretrained(_lowercase )
lowercase_ : int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowercase ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
@slow
def lowerCamelCase__ ( self ) -> Optional[Any]:
for model_name in ["roberta-base", "roberta-large"]:
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase )
lowercase_ : Optional[int] = FlaxRobertaModel.from_pretrained(_lowercase )
lowercase_ : int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowercase ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
def lowerCamelCase__ ( self ) -> Tuple:
with self.assertRaisesRegex(
_lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
lowercase_ : List[Any] = FlaxAutoModel.from_pretrained('bert-base' )
def lowerCamelCase__ ( self ) -> int:
with self.assertRaisesRegex(
_lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase_ : Union[str, Any] = FlaxAutoModel.from_pretrained(_lowercase , revision='aaaaaa' )
def lowerCamelCase__ ( self ) -> str:
with self.assertRaisesRegex(
_lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
lowercase_ : Union[str, Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCamelCase__ ( self ) -> Optional[int]:
with self.assertRaisesRegex(_lowercase , 'Use `from_pt=True` to load this model' ):
lowercase_ : Optional[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 7 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 1 |
'''simple docstring'''
A: Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def _UpperCAmelCase ( a : float ) -> str:
"""simple docstring"""
assert type(a ) in (int, float) and decimal == int(a )
lowercase_ : Tuple = int(a )
lowercase_ : int = ''
lowercase_ : Tuple = False
if decimal < 0:
lowercase_ : Optional[Any] = True
decimal *= -1
while decimal > 0:
lowercase_ , lowercase_ : Dict = divmod(a , 1_6 )
lowercase_ : Tuple = values[remainder] + hexadecimal
lowercase_ : Optional[Any] = '0x' + hexadecimal
if negative:
lowercase_ : str = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 1 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : int = 1_0_0_0 ) -> int:
"""simple docstring"""
lowercase_ : List[Any] = 2**power
lowercase_ : Tuple = str(a )
lowercase_ : Dict = list(a )
lowercase_ : Optional[Any] = 0
for i in list_num:
sum_of_num += int(a )
return sum_of_num
if __name__ == "__main__":
A: Any = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
A: List[Any] = solution(power)
print("Sum of the digits is: ", result)
| 7 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 1 |
'''simple docstring'''
from collections.abc import Callable
def _UpperCAmelCase ( a : Callable[[float], float] , a : float , a : float ) -> float:
"""simple docstring"""
lowercase_ : float = a
lowercase_ : float = b
if function(a ) == 0: # one of the a or b is a root for the function
return a
elif function(a ) == 0:
return b
elif (
function(a ) * function(a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowercase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(a ) == 0:
return mid
elif function(a ) * function(a ) < 0:
lowercase_ : Dict = mid
else:
lowercase_ : Optional[Any] = mid
lowercase_ : Any = start + (end - start) / 2.0
return mid
def _UpperCAmelCase ( a : float ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=3 , _lowercase=32 , _lowercase=3 , _lowercase=10 , _lowercase=[8, 16, 32, 64] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , _lowercase=["stage2", "stage3", "stage4"] , _lowercase=[2, 3, 4] , _lowercase=1 , ) -> str:
lowercase_ : List[Any] = parent
lowercase_ : str = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : List[Any] = hidden_sizes
lowercase_ : str = depths
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_labels
lowercase_ : Tuple = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Union[str, Any] = scope
lowercase_ : Union[str, Any] = len(_lowercase )
lowercase_ : str = out_features
lowercase_ : Optional[Any] = out_indices
lowercase_ : List[Any] = num_groups
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ) -> Any:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : List[str] = BitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Tuple = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Any = self.num_labels
lowercase_ : List[Any] = BitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
lowercase_ : int = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : int = None
lowercase_ : int = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[Any] = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : str = False
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[Any] = BitModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowerCamelCase__ ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ) -> List[Any]:
return
@unittest.skip(reason='Bit does not output attentions' )
def lowerCamelCase__ ( self ) -> str:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def lowerCamelCase__ ( self ) -> Any:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def lowerCamelCase__ ( self ) -> Any:
pass
def lowerCamelCase__ ( self ) -> Any:
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(_lowercase )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def lowerCamelCase__ ( self ) -> str:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
lowercase_ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowercase_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Dict = layer_type
lowercase_ : Optional[int] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def lowerCamelCase__ ( self ) -> Tuple:
pass
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCamelCase__ ( self ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[Any] = BitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( ) -> str:
"""simple docstring"""
lowercase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ) -> Any:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : List[Any] = image_processor(images=_lowercase , return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**_lowercase )
# verify the logits
lowercase_ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowercase_ : Dict = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@require_torch
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = BitConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : str = BitModelTester(self )
| 7 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class __magic_name__ :
"""simple docstring"""
def __init__( self ) -> None:
lowercase_ : List[Any] = [2, 1, 2, -1]
lowercase_ : str = [1, 2, 3, 4]
def lowerCamelCase__ ( self ) -> list[float]:
lowercase_ : Optional[int] = len(self.first_signal )
lowercase_ : List[str] = len(self.second_signal )
lowercase_ : List[str] = max(_lowercase , _lowercase )
# create a zero matrix of max_length x max_length
lowercase_ : Union[str, Any] = [[0] * max_length for i in range(_lowercase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowercase ):
lowercase_ : Optional[int] = deque(self.second_signal )
rotated_signal.rotate(_lowercase )
for j, item in enumerate(_lowercase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase_ : List[str] = np.matmul(np.transpose(_lowercase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowercase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A: Dict = logging.get_logger(__name__)
set_seed(7_7_0)
A: Optional[Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
A: Dict = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
A: Dict = os.path.dirname(os.path.abspath(__file__))
A: Tuple = os.path.join(os.path.expanduser("~"), ".cache")
A: List[str] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _UpperCAmelCase ( a : List[Any] , a : Union[str, Any]=False ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(a , REMOTE_MODEL_PATHS[key]['file_name'] )
def _UpperCAmelCase ( a : List[str] , a : Any ) -> Any:
"""simple docstring"""
os.makedirs(a , exist_ok=a )
hf_hub_download(repo_id=a , filename=a , local_dir=a )
def _UpperCAmelCase ( a : Dict , a : Optional[Any] , a : Optional[int]=False , a : int="text" ) -> Union[str, Any]:
"""simple docstring"""
if model_type == "text":
lowercase_ : Tuple = BarkSemanticModel
lowercase_ : List[str] = BarkSemanticConfig
lowercase_ : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase_ : int = BarkCoarseModel
lowercase_ : str = BarkCoarseConfig
lowercase_ : Optional[int] = BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase_ : Dict = BarkFineModel
lowercase_ : Optional[int] = BarkFineConfig
lowercase_ : Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase_ : Optional[int] = f"{model_type}_small" if use_small else model_type
lowercase_ : Optional[int] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(a ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
lowercase_ : Any = torch.load(a , map_location=a )
# this is a hack
lowercase_ : Union[str, Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowercase_ : Union[str, Any] = model_args['vocab_size']
lowercase_ : str = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase_ : List[Any] = model_args.pop('n_head' )
lowercase_ : Optional[int] = model_args.pop('n_embd' )
lowercase_ : Optional[int] = model_args.pop('n_layer' )
lowercase_ : Union[str, Any] = ConfigClass(**checkpoint['model_args'] )
lowercase_ : Union[str, Any] = ModelClass(config=a )
lowercase_ : Any = GenerationConfigClass()
lowercase_ : str = model_generation_config
lowercase_ : List[str] = checkpoint['model']
# fixup checkpoint
lowercase_ : List[str] = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(a ):
# replace part of the key with corresponding layer name in HF implementation
lowercase_ : Dict = k[len(a ) :]
for old_layer_name in new_layer_name_dict:
lowercase_ : Any = new_k.replace(a , new_layer_name_dict[old_layer_name] )
lowercase_ : Dict = state_dict.pop(a )
lowercase_ : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase_ : List[Any] = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowercase_ : int = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase_ : Union[str, Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(a ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(a ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(a , strict=a )
lowercase_ : Optional[int] = model.num_parameters(exclude_embeddings=a )
lowercase_ : List[str] = checkpoint['best_val_loss'].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(a , 3 )} loss" )
model.eval()
model.to(a )
del checkpoint, state_dict
return model
def _UpperCAmelCase ( a : Any , a : Dict=False , a : List[str]="text" ) -> List[str]:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase_ : Tuple = 'cpu' # do conversion on cpu
lowercase_ : Optional[Any] = _get_ckpt_path(a , use_small=a )
lowercase_ : int = _load_model(a , a , model_type=a , use_small=a )
# load bark initial model
lowercase_ : str = _bark_load_model(a , 'cpu' , model_type=a , use_small=a )
if model_type == "text":
lowercase_ : str = bark_model['model']
if model.num_parameters(exclude_embeddings=a ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowercase_ : Any = 5
lowercase_ : Optional[Any] = 1_0
if model_type in ["text", "coarse"]:
lowercase_ : Dict = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
lowercase_ : Any = bark_model(a )[0]
lowercase_ : Optional[Any] = model(a )
# take last logits
lowercase_ : Any = output_new_model_total.logits[:, [-1], :]
else:
lowercase_ : Any = 3
lowercase_ : Dict = 8
lowercase_ : Dict = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase_ : Any = model(a , a )
lowercase_ : List[Any] = bark_model(a , a )
lowercase_ : Tuple = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
def _UpperCAmelCase ( a : Any , a : Tuple , a : Union[str, Any] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase_ : str = os.path.join(a , a )
lowercase_ : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(a , 'config.json' ) )
lowercase_ : List[str] = BarkCoarseConfig.from_pretrained(os.path.join(a , 'config.json' ) )
lowercase_ : Optional[Any] = BarkFineConfig.from_pretrained(os.path.join(a , 'config.json' ) )
lowercase_ : str = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowercase_ : List[str] = BarkSemanticModel.from_pretrained(a )
lowercase_ : List[Any] = BarkCoarseModel.from_pretrained(a )
lowercase_ : Any = BarkFineModel.from_pretrained(a )
lowercase_ : Any = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowercase_ : Optional[Any] = BarkConfig.from_sub_model_configs(
a , a , a , a )
lowercase_ : Tuple = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase_ : Any = BarkModel(a )
lowercase_ : List[Any] = semantic
lowercase_ : Any = coarseAcoustic
lowercase_ : Optional[Any] = fineAcoustic
lowercase_ : int = codec
lowercase_ : Tuple = bark_generation_config
Path(a ).mkdir(exist_ok=a )
bark.save_pretrained(a , repo_id=a , push_to_hub=a )
if __name__ == "__main__":
A: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
A: Any = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 7 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _UpperCAmelCase ( a : Sequence[float] , a : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
lowercase_ : Dict = 0 if allow_empty_subarrays else float('-inf' )
lowercase_ : str = 0.0
for num in arr:
lowercase_ : int = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ : Tuple = max(a , a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A: int = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 7 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def _UpperCAmelCase ( a : int ) -> datetime:
"""simple docstring"""
lowercase_ : str = year % 1_9
lowercase_ : Optional[int] = year % 4
lowercase_ : int = year % 7
lowercase_ : int = math.floor(year / 1_0_0 )
lowercase_ : Optional[int] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
lowercase_ : List[Any] = leap_day_inhibits / 4
lowercase_ : Union[str, Any] = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
lowercase_ : List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowercase_ : List[str] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
lowercase_ : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(a , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(a , 4 , 1_8 )
else:
return datetime(a , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
A: Optional[Any] = "will be" if year > datetime.now().year else "was"
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A: List[str] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
A: Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'bert-generation'
def __init__( self , _lowercase=5_0358 , _lowercase=1024 , _lowercase=24 , _lowercase=16 , _lowercase=4096 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=0 , _lowercase=2 , _lowercase=1 , _lowercase="absolute" , _lowercase=True , **_lowercase , ) -> Optional[Any]:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Any = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Dict = hidden_act
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : str = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : str = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Dict = position_embedding_type
lowercase_ : int = use_cache
| 7 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ShapEPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = ['prompt']
SCREAMING_SNAKE_CASE_ : Tuple = ['prompt']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE_ : int = False
@property
def lowerCamelCase__ ( self ) -> Dict:
return 32
@property
def lowerCamelCase__ ( self ) -> int:
return 32
@property
def lowerCamelCase__ ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ) -> str:
return 8
@property
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCamelCase__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCamelCase__ ( self ) -> Any:
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
lowercase_ : Union[str, Any] = PriorTransformer(**_lowercase )
return model
@property
def lowerCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase_ : List[str] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
lowercase_ : Dict = ShapERenderer(**_lowercase )
return model
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Optional[int] = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_text_encoder
lowercase_ : Optional[int] = self.dummy_tokenizer
lowercase_ : List[Any] = self.dummy_renderer
lowercase_ : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_lowercase , clip_sample=_lowercase , clip_sample_range=1.0 , )
lowercase_ : str = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowerCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Tuple:
if str(_lowercase ).startswith('mps' ):
lowercase_ : Union[str, Any] = torch.manual_seed(_lowercase )
else:
lowercase_ : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase_ : Union[str, Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[Any] = 'cpu'
lowercase_ : List[Any] = self.get_dummy_components()
lowercase_ : Optional[Any] = self.pipeline_class(**_lowercase )
lowercase_ : List[str] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : Dict = pipe(**self.get_dummy_inputs(_lowercase ) )
lowercase_ : Dict = output.images[0]
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase_ : int = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : str = torch_device == 'cpu'
lowercase_ : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowercase , relax_max_difference=_lowercase , )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : int = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**_lowercase )
lowercase_ : Optional[int] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : List[Any] = 1
lowercase_ : int = 2
lowercase_ : Union[str, Any] = self.get_dummy_inputs(_lowercase )
for key in inputs.keys():
if key in self.batch_params:
lowercase_ : List[Any] = batch_size * [inputs[key]]
lowercase_ : Tuple = pipe(**_lowercase , num_images_per_prompt=_lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
lowercase_ : int = ShapEPipeline.from_pretrained('openai/shap-e' )
lowercase_ : str = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
lowercase_ : Union[str, Any] = pipe(
'a shark' , generator=_lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A: Optional[Any] = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'albert'
def __init__( self , _lowercase=3_0000 , _lowercase=128 , _lowercase=4096 , _lowercase=12 , _lowercase=1 , _lowercase=64 , _lowercase=1_6384 , _lowercase=1 , _lowercase="gelu_new" , _lowercase=0 , _lowercase=0 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=0.1 , _lowercase="absolute" , _lowercase=0 , _lowercase=2 , _lowercase=3 , **_lowercase , ) -> Any:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Dict = vocab_size
lowercase_ : List[str] = embedding_size
lowercase_ : Any = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Tuple = num_hidden_groups
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : int = inner_group_num
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Dict = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : List[str] = classifier_dropout_prob
lowercase_ : Optional[int] = position_embedding_type
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE_ : int = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Optional[Any] = 32
lowercase_ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase_ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowercase_ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
lowercase_ : Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowercase_ : Tuple = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
lowercase_ : Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
lowercase_ : Optional[int] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL()
lowercase_ : Optional[Any] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def lowerCamelCase__ ( self , _lowercase , _lowercase=0 ) -> List[str]:
if str(_lowercase ).startswith('mps' ):
lowercase_ : Tuple = torch.manual_seed(_lowercase )
else:
lowercase_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Any = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Tuple = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowercase_ : Optional[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : Optional[int] = pipe('anime turle' , generator=_lowercase , output_type='np' )
lowercase_ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ : List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowercase_ : List[Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Optional[Any] = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowercase_ : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A: Optional[int] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
A: Union[str, Any] = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
A: Tuple = "|".join(sys.argv[1:])
A: List[Any] = re.compile(rf"""^({joined_dirs}).*?\.py$""")
A: int = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 7 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Union[str, Any] = logging.get_logger(__name__)
A: Optional[int] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'mvp'
SCREAMING_SNAKE_CASE_ : str = ['past_key_values']
SCREAMING_SNAKE_CASE_ : List[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowercase=5_0267 , _lowercase=1024 , _lowercase=12 , _lowercase=4096 , _lowercase=16 , _lowercase=12 , _lowercase=4096 , _lowercase=16 , _lowercase=0.0 , _lowercase=0.0 , _lowercase="gelu" , _lowercase=1024 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=0.0 , _lowercase=False , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=True , _lowercase=2 , _lowercase=2 , _lowercase=False , _lowercase=100 , _lowercase=800 , **_lowercase , ) -> Union[str, Any]:
lowercase_ : Optional[Any] = vocab_size
lowercase_ : List[str] = max_position_embeddings
lowercase_ : str = d_model
lowercase_ : Any = encoder_ffn_dim
lowercase_ : Dict = encoder_layers
lowercase_ : int = encoder_attention_heads
lowercase_ : Any = decoder_ffn_dim
lowercase_ : List[Any] = decoder_layers
lowercase_ : Union[str, Any] = decoder_attention_heads
lowercase_ : List[Any] = dropout
lowercase_ : List[str] = attention_dropout
lowercase_ : int = activation_dropout
lowercase_ : List[str] = activation_function
lowercase_ : int = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : str = decoder_layerdrop
lowercase_ : List[str] = classifier_dropout
lowercase_ : Any = use_cache
lowercase_ : List[Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : int = use_prompt
lowercase_ : List[str] = prompt_length
lowercase_ : List[str] = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _lowercase ):
lowercase_ : Any = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'The config can simply be saved and uploaded again to be fixed.' )
| 7 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
A: str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
A: Optional[Any] = {"facebook/blenderbot_small-90M": 5_1_2}
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = set()
lowercase_ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Union[str, Any] = char
lowercase_ : List[Any] = set(a )
return pairs
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] = ['input_ids', 'attention_mask']
def __init__( self , _lowercase , _lowercase , _lowercase="__start__" , _lowercase="__end__" , _lowercase="__unk__" , _lowercase="__null__" , **_lowercase , ) -> Any:
super().__init__(unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , **_lowercase )
with open(_lowercase , encoding='utf-8' ) as vocab_handle:
lowercase_ : int = json.load(_lowercase )
lowercase_ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(_lowercase , encoding='utf-8' ) as merges_handle:
lowercase_ : int = merges_handle.read().split('\n' )[1:-1]
lowercase_ : Any = [tuple(merge.split() ) for merge in merges]
lowercase_ : Optional[int] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase_ : Union[str, Any] = {}
@property
def lowerCamelCase__ ( self ) -> int:
return len(self.encoder )
def lowerCamelCase__ ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self , _lowercase ) -> str:
if token in self.cache:
return self.cache[token]
lowercase_ : Tuple = re.sub('([.,!?()])' , r' \1' , _lowercase )
lowercase_ : Union[str, Any] = re.sub('(\')' , r' \1 ' , _lowercase )
lowercase_ : Dict = re.sub(r'\s{2,}' , ' ' , _lowercase )
if "\n" in token:
lowercase_ : str = token.replace('\n' , ' __newln__' )
lowercase_ : Tuple = token.split(' ' )
lowercase_ : int = []
for token in tokens:
if not len(_lowercase ):
continue
lowercase_ : Dict = token.lower()
lowercase_ : Optional[Any] = tuple(_lowercase )
lowercase_ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowercase_ : int = get_pairs(_lowercase )
if not pairs:
words.append(_lowercase )
continue
while True:
lowercase_ : List[Any] = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Any = bigram
lowercase_ : int = []
lowercase_ : Dict = 0
while i < len(_lowercase ):
try:
lowercase_ : Optional[int] = word.index(_lowercase , _lowercase )
new_word.extend(word[i:j] )
lowercase_ : List[str] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Tuple = tuple(_lowercase )
lowercase_ : Union[str, Any] = new_word
if len(_lowercase ) == 1:
break
else:
lowercase_ : List[Any] = get_pairs(_lowercase )
lowercase_ : Dict = '@@ '.join(_lowercase )
lowercase_ : int = word[:-4]
lowercase_ : Tuple = word
words.append(_lowercase )
return " ".join(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
lowercase_ : str = []
lowercase_ : Union[str, Any] = re.findall(r'\S+\n?' , _lowercase )
for token in words:
split_tokens.extend(list(self.bpe(_lowercase ).split(' ' ) ) )
return split_tokens
def lowerCamelCase__ ( self , _lowercase ) -> int:
lowercase_ : str = token.lower()
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self , _lowercase ) -> str:
return self.decoder.get(_lowercase , self.unk_token )
def lowerCamelCase__ ( self , _lowercase ) -> str:
lowercase_ : str = ' '.join(_lowercase ).replace('@@ ' , '' ).strip()
return out_string
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ : int = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : List[str] = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '\n' )
lowercase_ : Union[str, Any] = 0
with open(_lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
lowercase_ : Optional[int] = token_index
writer.write(' '.join(_lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : int ) -> bool:
"""simple docstring"""
lowercase_ : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 7 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 1 |
'''simple docstring'''
import math
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
if (
not isinstance(a , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
if (
not isinstance(a , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=2 , _lowercase=99 , _lowercase=0 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=2 , _lowercase=4 , _lowercase="last" , _lowercase=True , _lowercase=None , _lowercase=0 , ) -> Tuple:
lowercase_ : Tuple = parent
lowercase_ : Optional[Any] = batch_size
lowercase_ : Tuple = seq_length
lowercase_ : Optional[int] = is_training
lowercase_ : Tuple = use_input_lengths
lowercase_ : int = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Union[str, Any] = gelu_activation
lowercase_ : Union[str, Any] = sinusoidal_embeddings
lowercase_ : Dict = causal
lowercase_ : Tuple = asm
lowercase_ : Union[str, Any] = n_langs
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Union[str, Any] = n_special
lowercase_ : Optional[Any] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Tuple = type_sequence_label_size
lowercase_ : str = initializer_range
lowercase_ : str = num_labels
lowercase_ : Tuple = num_choices
lowercase_ : Optional[Any] = summary_type
lowercase_ : Dict = use_proj
lowercase_ : str = scope
lowercase_ : int = bos_token_id
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[Any] = None
if self.use_input_lengths:
lowercase_ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase_ : Dict = None
if self.use_token_type_ids:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase_ : Union[str, Any] = None
lowercase_ : str = None
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Dict = ids_tensor([self.batch_size] , 2 ).float()
lowercase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ ( self ) -> Optional[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> int:
lowercase_ : Any = XLMModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[str] = model(_lowercase , lengths=_lowercase , langs=_lowercase )
lowercase_ : Tuple = model(_lowercase , langs=_lowercase )
lowercase_ : Tuple = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
lowercase_ : int = XLMWithLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[int] = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Tuple:
lowercase_ : Union[str, Any] = XLMForQuestionAnsweringSimple(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = model(_lowercase )
lowercase_ : str = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
lowercase_ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> int:
lowercase_ : int = XLMForQuestionAnswering(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(_lowercase )
lowercase_ : Any = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , p_mask=_lowercase , )
lowercase_ : Any = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , )
((lowercase_) , ) : str = result_with_labels.to_tuple()
lowercase_ : Tuple = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
((lowercase_) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[Any]:
lowercase_ : str = XLMForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : int = model(_lowercase )
lowercase_ : Any = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[Any]:
lowercase_ : int = self.num_labels
lowercase_ : Any = XLMForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Dict = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Optional[Any]:
lowercase_ : List[Any] = self.num_choices
lowercase_ : int = XLMForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : str = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : List[str] = config_and_inputs
lowercase_ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : int = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Optional[Any]:
lowercase_ : Dict = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
lowercase_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Optional[Any] = XLMModelTester(self )
lowercase_ : Tuple = ConfigTester(self , config_class=_lowercase , emb_dim=37 )
def lowerCamelCase__ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=False , _lowercase=1 ) -> int:
self.assertIsInstance(_lowercase , _lowercase )
self.assertListEqual(
[isinstance(_lowercase , _lowercase ) for iter_attentions in attentions] , [True] * len(_lowercase ) )
self.assertEqual(len(_lowercase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_lowercase ):
# adds PAD dummy token
lowercase_ : Optional[int] = min_length + idx + 1
lowercase_ : Optional[Any] = min_length + idx + 1
lowercase_ : int = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_lowercase ) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=False , _lowercase=1 ) -> Any:
self.assertIsInstance(_lowercase , _lowercase )
self.assertListEqual(
[isinstance(_lowercase , _lowercase ) for iter_hidden_states in hidden_states] , [True] * len(_lowercase ) , )
self.assertEqual(len(_lowercase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_lowercase ):
# adds PAD dummy token
lowercase_ : Union[str, Any] = min_length + idx + 1
lowercase_ : Tuple = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_lowercase ) , )
pass
@slow
def lowerCamelCase__ ( self ) -> int:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[Any] = XLMModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_lowercase )
lowercase_ : List[Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=_lowercase ) # the president
lowercase_ : int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase_ : Optional[int] = model.generate(_lowercase , do_sample=_lowercase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _lowercase )
| 7 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=64 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> str:
lowercase_ : str = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Tuple = seq_length
lowercase_ : str = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Tuple = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Any = embedding_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : int = type_sequence_label_size
lowercase_ : List[str] = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : List[str] = num_choices
lowercase_ : Dict = scope
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Union[str, Any] = None
if self.use_input_mask:
lowercase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[str] = None
if self.use_token_type_ids:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Tuple = None
lowercase_ : Any = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ) -> List[str]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
lowercase_ : Union[str, Any] = MegatronBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Tuple = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowercase_ : List[str] = model(_lowercase , token_type_ids=_lowercase )
lowercase_ : List[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
lowercase_ : Dict = MegatronBertForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Optional[Any] = MegatronBertForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Dict = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
lowercase_ : Tuple = MegatronBertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Union[str, Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : List[str] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
lowercase_ : Dict = self.num_labels
lowercase_ : Optional[int] = MegatronBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : str = self.num_labels
lowercase_ : List[str] = MegatronBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = self.num_choices
lowercase_ : Tuple = MegatronBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[str] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = True
# test_resize_embeddings = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Dict:
lowercase_ : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
lowercase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
lowercase_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Optional[Any] = MegatronBertModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase )
def _UpperCAmelCase ( a : List[Any] ) -> List[Any]:
"""simple docstring"""
return torch.tensor(
a , dtype=torch.long , device=a , )
A: Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.' )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Optional[int] = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
lowercase_ : Optional[Any] = os.path.join(os.environ['MYDIR'] , _lowercase )
lowercase_ : List[str] = MegatronBertModel.from_pretrained(_lowercase )
model.to(_lowercase )
model.half()
lowercase_ : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowercase_ : List[Any] = model(_lowercase )[0]
lowercase_ : int = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _lowercase )
lowercase_ : Union[str, Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
lowercase_ : int = output[0, ii, jj]
lowercase_ : Any = expected[3 * ii + jj]
lowercase_ : int = 'ii={} jj={} a={} b={}'.format(_lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
| 7 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Union[str, Any] = logging.get_logger(__name__)
A: Optional[Any] = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'rwkv'
SCREAMING_SNAKE_CASE_ : int = {'max_position_embeddings': 'context_length'}
def __init__( self , _lowercase=5_0277 , _lowercase=1024 , _lowercase=4096 , _lowercase=32 , _lowercase=None , _lowercase=None , _lowercase=1E-5 , _lowercase=0 , _lowercase=0 , _lowercase=6 , _lowercase=False , _lowercase=True , **_lowercase , ) -> int:
lowercase_ : Any = vocab_size
lowercase_ : List[str] = context_length
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase_ : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Any = rescale_every
lowercase_ : List[str] = use_cache
lowercase_ : Dict = bos_token_id
lowercase_ : List[str] = eos_token_id
super().__init__(
tie_word_embeddings=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __magic_name__ ( yaml.SafeLoader ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Dict:
lowercase_ : Tuple = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ : Dict = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys]
lowercase_ : Dict = Counter(_lowercase )
lowercase_ : Dict = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False ) -> Optional[Any]:
lowercase_ : Dict = super().construct_mapping(_lowercase , deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def _UpperCAmelCase ( a : str ) -> Tuple[Optional[str], str]:
"""simple docstring"""
lowercase_ : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ : Union[str, Any] = full_content[1:].index('---' ) + 1
lowercase_ : List[str] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls , _lowercase ) -> "DatasetMetadata":
with open(_lowercase , encoding='utf-8' ) as readme_file:
lowercase_ , lowercase_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def lowerCamelCase__ ( self , _lowercase ) -> Optional[Any]:
if path.exists():
with open(_lowercase , encoding='utf-8' ) as readme_file:
lowercase_ : Optional[Any] = readme_file.read()
else:
lowercase_ : Tuple = None
lowercase_ : Any = self._to_readme(_lowercase )
with open(_lowercase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(_lowercase )
def lowerCamelCase__ ( self , _lowercase = None ) -> str:
if readme_content is not None:
lowercase_ , lowercase_ : Tuple = _split_yaml_from_readme(_lowercase )
lowercase_ : List[str] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ : int = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def lowerCamelCase__ ( cls , _lowercase ) -> "DatasetMetadata":
lowercase_ : Union[str, Any] = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ : Dict = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def lowerCamelCase__ ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowercase , allow_unicode=_lowercase , encoding='utf-8' , ).decode('utf-8' )
A: Union[str, Any] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A: Dict = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
A: Union[str, Any] = ap.parse_args()
A: List[str] = Path(args.readme_filepath)
A: Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 7 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A: str = random.Random()
def _UpperCAmelCase ( a : List[str] , a : Dict=1.0 , a : str=None , a : int=None ) -> Tuple:
"""simple docstring"""
if rng is None:
lowercase_ : Any = global_rng
lowercase_ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=400 , _lowercase=2000 , _lowercase=1 , _lowercase=0.0 , _lowercase=1_6000 , _lowercase=True , _lowercase=True , ) -> Optional[Any]:
lowercase_ : Dict = parent
lowercase_ : int = batch_size
lowercase_ : Tuple = min_seq_length
lowercase_ : Dict = max_seq_length
lowercase_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ : str = feature_size
lowercase_ : int = padding_value
lowercase_ : Optional[Any] = sampling_rate
lowercase_ : Dict = return_attention_mask
lowercase_ : Any = do_normalize
def lowerCamelCase__ ( self ) -> List[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__ ( self , _lowercase=False , _lowercase=False ) -> Any:
def _flatten(_lowercase ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
lowercase_ : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase_ : Optional[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ : List[Any] = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = WavaVecaFeatureExtractor
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = WavaVecaFeatureExtractionTester(self )
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(_lowercase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCamelCase__ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Optional[Any] = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ : List[str] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowercase_ : int = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test batched
lowercase_ : Dict = feat_extract(_lowercase , return_tensors='np' ).input_values
lowercase_ : List[Any] = feat_extract(_lowercase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ : str = np.asarray(_lowercase )
lowercase_ : Optional[int] = feat_extract(_lowercase , return_tensors='np' ).input_values
lowercase_ : Dict = feat_extract(_lowercase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Dict = ['longest', 'max_length', 'do_not_pad']
lowercase_ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(_lowercase , _lowercase ):
lowercase_ : List[str] = feat_extract(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors='np' )
lowercase_ : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Any = range(800 , 1400 , 200 )
lowercase_ : Optional[int] = [floats_list((1, x) )[0] for x in lengths]
lowercase_ : int = ['longest', 'max_length', 'do_not_pad']
lowercase_ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(_lowercase , _lowercase ):
lowercase_ : Optional[Any] = feat_extract(_lowercase , max_length=_lowercase , padding=_lowercase )
lowercase_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Optional[int] = feat_extract(
_lowercase , truncation=_lowercase , max_length=1000 , padding='max_length' , return_tensors='np' )
lowercase_ : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Optional[int] = feat_extract(
_lowercase , truncation=_lowercase , max_length=1000 , padding='longest' , return_tensors='np' )
lowercase_ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : List[str] = feat_extract(
_lowercase , truncation=_lowercase , max_length=2000 , padding='longest' , return_tensors='np' )
lowercase_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def lowerCamelCase__ ( self ) -> Dict:
import torch
lowercase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : Dict = np.random.rand(100 ).astype(np.floataa )
lowercase_ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ : List[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase_ : List[str] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCamelCase__ ( self ) -> str:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowercase_ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowercase )
lowercase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(_lowercase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 7 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 1 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Dict = 'tokenizer_file'
SCREAMING_SNAKE_CASE_ : List[str] = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def lowerCamelCase__ ( self ) -> List[Any]:
super().setUp()
lowercase_ : int = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self , **_lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = self.get_rust_tokenizer()
lowercase_ : Any = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowercase_ : Dict = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowercase_ : List[Any] = tokenizer.batch_encode_plus(_lowercase )['input_ids']
self.assertListEqual(_lowercase , _lowercase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=6 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase_ : Dict = 'This is a simple input'
lowercase_ : List[str] = ['This is a simple input 1', 'This is a simple input 2']
lowercase_ : List[Any] = ('This is a simple input', 'This is a pair')
lowercase_ : List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowercase_ : List[Any] = None # Hotfixing padding = None
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='max_length' )
# Simple input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='max_length' )
# Simple input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='max_length' , )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='max_length' )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='max_length' )
# Pair input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='max_length' , )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : List[str] = self.get_rust_tokenizer()
lowercase_ : Dict = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_lowercase )
lowercase_ : Optional[Any] = next(iter(_lowercase ) )['premise'] # pick up one data
lowercase_ : Any = list(sample_data.values() )
lowercase_ : int = list(map(tokenizer.encode , _lowercase ) )
lowercase_ : Optional[Any] = [tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase ) for x in output_tokens]
self.assertListEqual(_lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> int:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 1 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCAmelCase ( a : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(a , 0 , a , args=(a) )[0]
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
return math.pow(a , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 7 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A: int = logging.get_logger(__name__)
A: Optional[int] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'trajectory_transformer'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=100 , _lowercase=5 , _lowercase=1 , _lowercase=1 , _lowercase=249 , _lowercase=6 , _lowercase=17 , _lowercase=25 , _lowercase=4 , _lowercase=4 , _lowercase=128 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.00_06 , _lowercase=512 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=1 , _lowercase=True , _lowercase=1 , _lowercase=5_0256 , _lowercase=5_0256 , **_lowercase , ) -> Tuple:
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Union[str, Any] = action_weight
lowercase_ : Any = reward_weight
lowercase_ : str = value_weight
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Dict = block_size
lowercase_ : Union[str, Any] = action_dim
lowercase_ : Tuple = observation_dim
lowercase_ : Any = transition_dim
lowercase_ : Optional[int] = learning_rate
lowercase_ : Optional[int] = n_layer
lowercase_ : Tuple = n_head
lowercase_ : int = n_embd
lowercase_ : List[str] = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : Tuple = resid_pdrop
lowercase_ : List[str] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : int = kaiming_initializer_range
lowercase_ : int = use_cache
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 1 |
'''simple docstring'''
A: Optional[Any] = tuple[float, float, float]
A: Union[str, Any] = tuple[float, float, float]
def _UpperCAmelCase ( a : Pointad , a : Pointad ) -> Vectorad:
"""simple docstring"""
lowercase_ : int = end_pointa[0] - end_pointa[0]
lowercase_ : Tuple = end_pointa[1] - end_pointa[1]
lowercase_ : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCAmelCase ( a : Vectorad , a : Vectorad ) -> Vectorad:
"""simple docstring"""
lowercase_ : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ : Union[str, Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ : Optional[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCAmelCase ( a : Vectorad , a : int ) -> bool:
"""simple docstring"""
return tuple(round(a , a ) for x in vector ) == (0, 0, 0)
def _UpperCAmelCase ( a : Pointad , a : Pointad , a : Pointad , a : int = 1_0 ) -> bool:
"""simple docstring"""
lowercase_ : Union[str, Any] = create_vector(a , a )
lowercase_ : Union[str, Any] = create_vector(a , a )
return is_zero_vector(get_ad_vectors_cross(a , a ) , a )
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(a )
lowercase_ : List[str] = cos(a )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : Tuple = (1 - _cos) / 2
lowercase_ : Any = 1 - _cos
lowercase_ : List[Any] = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[str] = 1 - alpha
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(a )
lowercase_ : Dict = cos(a )
lowercase_ : List[str] = _sin / (2 * q_factor)
lowercase_ : Any = (1 + _cos) / 2
lowercase_ : Any = -1 - _cos
lowercase_ : int = 1 + alpha
lowercase_ : Dict = -2 * _cos
lowercase_ : Dict = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowercase_ : Optional[Any] = tau * frequency / samplerate
lowercase_ : List[Any] = sin(a )
lowercase_ : Any = cos(a )
lowercase_ : List[str] = _sin / (2 * q_factor)
lowercase_ : List[Any] = _sin / 2
lowercase_ : Optional[int] = 0
lowercase_ : Dict = -ba
lowercase_ : Optional[Any] = 1 + alpha
lowercase_ : Dict = -2 * _cos
lowercase_ : str = 1 - alpha
lowercase_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(a )
lowercase_ : Union[str, Any] = cos(a )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : int = 1 - alpha
lowercase_ : Dict = -2 * _cos
lowercase_ : List[Any] = 1 + alpha
lowercase_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Optional[Any] = sin(a )
lowercase_ : Any = cos(a )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : List[str] = 1_0 ** (gain_db / 4_0)
lowercase_ : List[Any] = 1 + alpha * big_a
lowercase_ : Tuple = -2 * _cos
lowercase_ : Any = 1 - alpha * big_a
lowercase_ : Dict = 1 + alpha / big_a
lowercase_ : Optional[int] = -2 * _cos
lowercase_ : Optional[int] = 1 - alpha / big_a
lowercase_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowercase_ : Tuple = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(a )
lowercase_ : Any = cos(a )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : str = 1_0 ** (gain_db / 4_0)
lowercase_ : str = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Any = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : List[str] = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Any = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : List[str] = 2 * sqrt(a ) * alpha
lowercase_ : Optional[Any] = big_a * (pmc + aaa)
lowercase_ : Dict = 2 * big_a * mpc
lowercase_ : List[str] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : List[str] = -2 * pmpc
lowercase_ : List[Any] = ppmc - aaa
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowercase_ : str = tau * frequency / samplerate
lowercase_ : str = sin(a )
lowercase_ : Tuple = cos(a )
lowercase_ : Tuple = _sin / (2 * q_factor)
lowercase_ : int = 1_0 ** (gain_db / 4_0)
lowercase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : str = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Tuple = 2 * sqrt(a ) * alpha
lowercase_ : List[str] = big_a * (ppmc + aaa)
lowercase_ : Optional[Any] = -2 * big_a * pmpc
lowercase_ : Union[str, Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[int] = pmc + aaa
lowercase_ : Any = 2 * mpc
lowercase_ : Optional[int] = pmc - aaa
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 7 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> list:
"""simple docstring"""
lowercase_ : List[Any] = [0] * len(a )
for i in range(1 , len(a ) ):
# use last results for better performance - dynamic programming
lowercase_ : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase_ : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase_ : Any = j
return prefix_result
def _UpperCAmelCase ( a : str ) -> int:
"""simple docstring"""
return max(prefix_function(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
A: List[Any] = 1_0
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ) -> int:
"""simple docstring"""
for i in range(a , a ):
if array[i] == target:
return i
return -1
def _UpperCAmelCase ( a : list[int] , a : int ) -> int:
"""simple docstring"""
lowercase_ : List[str] = 0
lowercase_ : Any = len(a )
while left <= right:
if right - left < precision:
return lin_search(a , a , a , a )
lowercase_ : Optional[Any] = (left + right) // 3 + 1
lowercase_ : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase_ : List[str] = one_third - 1
elif array[two_third] < target:
lowercase_ : Optional[int] = two_third + 1
else:
lowercase_ : Optional[int] = one_third + 1
lowercase_ : Any = two_third - 1
else:
return -1
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ) -> int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(a , a , a , a )
lowercase_ : Tuple = (left + right) // 3 + 1
lowercase_ : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a , one_third - 1 , a , a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , a , a , a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , a , a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by comma:\n").strip()
A: Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
A: Dict = int(input("Enter the number to be found in the list:\n").strip())
A: int = ite_ternary_search(collection, target)
A: List[str] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("Not found")
| 7 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'nllb-moe'
SCREAMING_SNAKE_CASE_ : Optional[int] = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowercase=12_8112 , _lowercase=1024 , _lowercase=12 , _lowercase=4096 , _lowercase=16 , _lowercase=12 , _lowercase=4096 , _lowercase=16 , _lowercase=0.05 , _lowercase=0.05 , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=1024 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=2 , _lowercase=True , _lowercase=False , _lowercase="float32" , _lowercase=False , _lowercase=128 , _lowercase=64 , _lowercase=4 , _lowercase=4 , _lowercase=0.0_01 , _lowercase=0.0_01 , _lowercase="all" , _lowercase=False , _lowercase=False , _lowercase=1.0 , _lowercase=0.2 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=False , **_lowercase , ) -> Optional[int]:
lowercase_ : List[str] = vocab_size
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Optional[int] = d_model
lowercase_ : Dict = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Union[str, Any] = encoder_attention_heads
lowercase_ : Any = decoder_ffn_dim
lowercase_ : str = decoder_layers
lowercase_ : Optional[Any] = decoder_attention_heads
lowercase_ : Optional[Any] = dropout
lowercase_ : Any = attention_dropout
lowercase_ : Any = activation_dropout
lowercase_ : List[Any] = activation_function
lowercase_ : List[str] = init_std
lowercase_ : Dict = encoder_layerdrop
lowercase_ : Dict = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Optional[int] = encoder_layers
lowercase_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : List[Any] = router_z_loss_coef
lowercase_ : Union[str, Any] = router_aux_loss_coef
lowercase_ : List[str] = decoder_sparse_step
lowercase_ : Any = encoder_sparse_step
lowercase_ : List[str] = num_experts
lowercase_ : Optional[Any] = expert_capacity
lowercase_ : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
lowercase_ : Optional[Any] = router_dtype
lowercase_ : List[Any] = router_ignore_padding_tokens
lowercase_ : Dict = batch_prioritized_routing
lowercase_ : Optional[int] = second_expert_policy
lowercase_ : Any = normalize_router_prob_before_dropping
lowercase_ : int = moe_eval_capacity_token_fraction
lowercase_ : Union[str, Any] = moe_token_dropout
lowercase_ : int = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
A: Optional[int] = "sshleifer/mar_enro_6_3_student"
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[int]:
super().setUp()
lowercase_ : List[Any] = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_lowercase , )
lowercase_ : Optional[Any] = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def lowerCamelCase__ ( self ) -> Tuple:
MarianMTModel.from_pretrained(_lowercase )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : Union[str, Any] = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowercase_ : int = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowercase_ : str = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowercase_ : Tuple = bash_script.replace(_lowercase , str(_lowercase ) )
lowercase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase_ : Any = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase_ : Union[str, Any] = ['finetune.py'] + bash_script.split() + args
with patch.object(_lowercase , 'argv' , _lowercase ):
lowercase_ : str = argparse.ArgumentParser()
lowercase_ : Tuple = pl.Trainer.add_argparse_args(_lowercase )
lowercase_ : List[str] = SummarizationModule.add_model_specific_args(_lowercase , os.getcwd() )
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : Tuple = main(_lowercase )
# Check metrics
lowercase_ : Dict = load_json(model.metrics_save_path )
lowercase_ : Tuple = metrics['val'][0]
lowercase_ : Tuple = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _lowercase )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase_ : Optional[Any] = os.listdir(_lowercase )
lowercase_ : Union[str, Any] = [x for x in contents if x.endswith('.ckpt' )][0]
lowercase_ : Any = os.path.join(args.output_dir , _lowercase )
lowercase_ : List[Any] = torch.load(_lowercase , map_location='cpu' )
lowercase_ : Dict = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase_ : int = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self ) -> str:
lowercase_ : str = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
lowercase_ : List[Any] = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowercase_ : List[Any] = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowercase_ : Optional[int] = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowercase_ : int = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowercase_ : int = bash_script.replace(_lowercase , str(_lowercase ) )
lowercase_ : Dict = self.get_auto_remove_tmp_dir()
lowercase_ : Optional[Any] = bash_script.replace('--fp16' , '' )
lowercase_ : Optional[Any] = 6
lowercase_ : str = (
['distillation.py']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'--gpus=1',
'--learning_rate=1e-3',
f"--num_train_epochs={epochs}",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_lowercase , 'argv' , _lowercase ):
lowercase_ : Optional[int] = argparse.ArgumentParser()
lowercase_ : List[Any] = pl.Trainer.add_argparse_args(_lowercase )
lowercase_ : int = SummarizationDistiller.add_model_specific_args(_lowercase , os.getcwd() )
lowercase_ : Tuple = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase_ : Optional[int] = distill_main(_lowercase )
# Check metrics
lowercase_ : Any = load_json(model.metrics_save_path )
lowercase_ : Tuple = metrics['val'][0]
lowercase_ : Optional[Any] = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase_ : List[Any] = os.listdir(_lowercase )
lowercase_ : List[str] = [x for x in contents if x.endswith('.ckpt' )][0]
lowercase_ : int = os.path.join(args.output_dir , _lowercase )
lowercase_ : str = torch.load(_lowercase , map_location='cpu' )
lowercase_ : Any = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase_ : int = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A: Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 1 |
'''simple docstring'''
import requests
A: Tuple = "YOUR API KEY"
def _UpperCAmelCase ( a : str , a : str = giphy_api_key ) -> list:
"""simple docstring"""
lowercase_ : Dict = '+'.join(query.split() )
lowercase_ : str = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
lowercase_ : str = requests.get(a ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 7 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE_ : str = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE_ : Tuple = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def lowerCamelCase__ ( self ) -> List[str]:
torch.manual_seed(0 )
lowercase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
lowercase_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
lowercase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
lowercase_ : Dict = CLIPTextModel(_lowercase )
lowercase_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase_ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase__ ( self , _lowercase , _lowercase=0 ) -> int:
if str(_lowercase ).startswith('mps' ):
lowercase_ : Tuple = torch.manual_seed(_lowercase )
else:
lowercase_ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : List[str] = self.get_dummy_components()
lowercase_ : int = TextToVideoSDPipeline(**_lowercase )
lowercase_ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
lowercase_ : Optional[Any] = self.get_dummy_inputs(_lowercase )
lowercase_ : Any = 'np'
lowercase_ : Union[str, Any] = sd_pipe(**_lowercase ).frames
lowercase_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase_ : List[str] = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ) -> Optional[int]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase__ ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def lowerCamelCase__ ( self ) -> Optional[int]:
pass
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
lowercase_ : List[str] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowercase_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ : int = pipe.to('cuda' )
lowercase_ : int = 'Spiderman is surfing'
lowercase_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : int = pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type='pt' ).frames
lowercase_ : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ) -> int:
lowercase_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
lowercase_ : Union[str, Any] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowercase_ : List[str] = pipe.to('cuda' )
lowercase_ : str = 'Spiderman is surfing'
lowercase_ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase_ : List[str] = pipe(_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='pt' ).frames
lowercase_ : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A: Optional[Any] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ['input_features', 'attention_mask']
def __init__( self , _lowercase=80 , _lowercase=1_6000 , _lowercase=0.0 , _lowercase=10 , _lowercase=25 , _lowercase="hamming_window" , _lowercase=3_27_68.0 , _lowercase=0.97 , _lowercase=1.0 , _lowercase=True , _lowercase=True , _lowercase=False , **_lowercase , ) -> int:
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
lowercase_ : str = feature_size
lowercase_ : Optional[Any] = sampling_rate
lowercase_ : str = padding_value
lowercase_ : Optional[int] = hop_length
lowercase_ : Optional[int] = win_length
lowercase_ : Optional[int] = frame_signal_scale
lowercase_ : List[Any] = preemphasis_coeff
lowercase_ : str = mel_floor
lowercase_ : Tuple = normalize_means
lowercase_ : List[Any] = normalize_vars
lowercase_ : Optional[int] = win_function
lowercase_ : List[Any] = return_attention_mask
lowercase_ : Optional[int] = win_length * sampling_rate // 1000
lowercase_ : Dict = hop_length * sampling_rate // 1000
lowercase_ : Union[str, Any] = optimal_fft_length(self.sample_size )
lowercase_ : str = (self.n_fft // 2) + 1
def lowerCamelCase__ ( self , _lowercase ) -> np.ndarray:
if self.win_function == "hamming_window":
lowercase_ : Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowercase )
else:
lowercase_ : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function )
lowercase_ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowercase_ : List[Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=_lowercase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowercase , preemphasis=self.preemphasis_coeff , mel_filters=_lowercase , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
# make sure we normalize float32 arrays
if self.normalize_means:
lowercase_ : Any = x[:input_length].mean(axis=0 )
lowercase_ : Optional[Any] = np.subtract(_lowercase , _lowercase )
if self.normalize_vars:
lowercase_ : List[Any] = x[:input_length].std(axis=0 )
lowercase_ : List[str] = np.divide(_lowercase , _lowercase )
if input_length < x.shape[0]:
lowercase_ : List[Any] = padding_value
# make sure array is in float32
lowercase_ : Optional[Any] = x.astype(np.floataa )
return x
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[np.ndarray]:
lowercase_ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowercase , _lowercase , self.padding_value ) for x, n in zip(_lowercase , _lowercase )]
def __call__( self , _lowercase , _lowercase = False , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase_ : Optional[Any] = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowercase_ : Optional[Any] = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ : str = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
lowercase_ : str = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : List[Any] = [raw_speech]
# extract fbank features
lowercase_ : str = [self._extract_mfsc_features(_lowercase ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase_ : Any = BatchFeature({'input_features': features} )
lowercase_ : Any = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
lowercase_ : str = padded_inputs.get('input_features' )
if isinstance(input_features[0] , _lowercase ):
lowercase_ : Optional[Any] = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
lowercase_ : str = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase_ : Dict = [np.asarray(_lowercase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase_ : Optional[int] = (
np.array(_lowercase , dtype=np.intaa )
if self._get_padding_strategies(_lowercase , max_length=_lowercase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase_ : Any = self.normalize(
padded_inputs['input_features'] , attention_mask=_lowercase )
if return_tensors is not None:
lowercase_ : Optional[int] = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 7 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A: int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A: Any = None
A: Optional[int] = logging.get_logger(__name__)
A: List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A: Dict = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A: Tuple = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
A: Optional[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ : str = MBartTokenizer
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
lowercase_ : List[Any] = vocab_file
lowercase_ : List[Any] = False if not self.vocab_file else True
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(_lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : List[Any] = src_lang if src_lang is not None else 'en_XX'
lowercase_ : int = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : int = src_lang
lowercase_ : Any = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(_lowercase )
lowercase_ : Optional[int] = tgt_lang_id
return inputs
def lowerCamelCase__ ( self , _lowercase , _lowercase = "en_XX" , _lowercase = None , _lowercase = "ro_RO" , **_lowercase , ) -> BatchEncoding:
lowercase_ : Optional[int] = src_lang
lowercase_ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : str = self.convert_tokens_to_ids(_lowercase )
lowercase_ : Any = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self , _lowercase ) -> None:
lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(_lowercase )
lowercase_ : List[str] = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
lowercase_ : Tuple = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 7 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 1 |
'''simple docstring'''
import random
def _UpperCAmelCase ( a : Union[str, Any] , a : str , a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = a[left_index]
lowercase_ : Tuple = left_index + 1
for j in range(left_index + 1 , a ):
if a[j] < pivot:
lowercase_ , lowercase_ : Optional[Any] = a[i], a[j]
i += 1
lowercase_ , lowercase_ : Tuple = a[i - 1], a[left_index]
return i - 1
def _UpperCAmelCase ( a : Any , a : str , a : Any ) -> Any:
"""simple docstring"""
if left < right:
lowercase_ : List[Any] = random.randint(a , right - 1 )
lowercase_ , lowercase_ : Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase_ : int = partition(a , a , a )
quick_sort_random(
a , a , a ) # recursive quicksort to the left of the pivot point
quick_sort_random(
a , pivot_index + 1 , a ) # recursive quicksort to the right of the pivot point
def _UpperCAmelCase ( ) -> Any:
"""simple docstring"""
lowercase_ : Optional[Any] = input('Enter numbers separated by a comma:\n' ).strip()
lowercase_ : List[Any] = [int(a ) for item in user_input.split(',' )]
quick_sort_random(a , 0 , len(a ) )
print(a )
if __name__ == "__main__":
main()
| 7 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A: str = logging.get_logger()
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : nn.Module
SCREAMING_SNAKE_CASE_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : list = field(default_factory=UpperCAmelCase_ )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Dict:
lowercase_ : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(_lowercase , nn.Convad ) or isinstance(_lowercase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_lowercase )
def __call__( self , _lowercase ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_lowercase )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase__ ( self ) -> Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : nn.Module
SCREAMING_SNAKE_CASE_ : nn.Module
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List = field(default_factory=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List = field(default_factory=UpperCAmelCase_ )
def __call__( self , _lowercase ) -> Any:
lowercase_ : List[str] = Tracker(self.dest )(_lowercase ).parametrized
lowercase_ : Any = Tracker(self.src )(_lowercase ).parametrized
lowercase_ : Optional[Any] = list(filter(lambda _lowercase : type(_lowercase ) not in self.src_skip , _lowercase ) )
lowercase_ : Tuple = list(filter(lambda _lowercase : type(_lowercase ) not in self.dest_skip , _lowercase ) )
if len(_lowercase ) != len(_lowercase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_lowercase )} operations while"
f" destination module has {len(_lowercase )}." )
for dest_m, src_m in zip(_lowercase , _lowercase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def _UpperCAmelCase ( a : str , a : ResNetConfig , a : Path , a : bool = True ) -> Any:
"""simple docstring"""
print(f"Converting {name}..." )
with torch.no_grad():
lowercase_ : List[str] = timm.create_model(a , pretrained=a ).eval()
lowercase_ : List[str] = ResNetForImageClassification(a ).eval()
lowercase_ : Tuple = ModuleTransfer(src=a , dest=a )
lowercase_ : Optional[int] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(a )
assert torch.allclose(from_model(a ) , our_model(a ).logits ), "The model logits don't match the original one."
lowercase_ : Optional[Any] = f"resnet{'-'.join(name.split('resnet' ) )}"
print(a )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=a , )
# we can use the convnext one
lowercase_ : Dict = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=a , )
print(f"Pushed {checkpoint_name}" )
def _UpperCAmelCase ( a : Path , a : str = None , a : bool = True ) -> Any:
"""simple docstring"""
lowercase_ : List[Any] = 'imagenet-1k-id2label.json'
lowercase_ : Union[str, Any] = 1_0_0_0
lowercase_ : str = (1, num_labels)
lowercase_ : List[str] = 'huggingface/label-files'
lowercase_ : Optional[Any] = num_labels
lowercase_ : Optional[Any] = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[str] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : Any = idalabel
lowercase_ : int = {v: k for k, v in idalabel.items()}
lowercase_ : List[str] = partial(a , num_labels=a , idalabel=a , labelaid=a )
lowercase_ : List[str] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(a , names_to_config[model_name] , a , a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a , a , a , a )
return config, expected_shape
if __name__ == "__main__":
A: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
A: Union[str, Any] = parser.parse_args()
A: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 7 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 1 |
'''simple docstring'''
A: Any = 8.314_462 # Unit - J mol-1 K-1
def _UpperCAmelCase ( a : float , a : float , a : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCAmelCase ( a : float , a : float , a : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 7 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 1 |
'''simple docstring'''
import math
import os
import sys
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : str = ''
try:
with open(a , 'rb' ) as binary_file:
lowercase_ : Union[str, Any] = binary_file.read()
for dat in data:
lowercase_ : str = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCAmelCase ( a : dict[str, str] , a : str , a : int , a : str ) -> None:
"""simple docstring"""
lexicon.pop(a )
lowercase_ : Dict = last_match_id
if math.loga(a ).is_integer():
for curr_key in lexicon:
lowercase_ : Dict = '0' + lexicon[curr_key]
lowercase_ : str = bin(a )[2:]
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Optional[Any] = {'0': '0', '1': '1'}
lowercase_ , lowercase_ : Union[str, Any] = '', ''
lowercase_ : List[str] = len(a )
for i in range(len(a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(a , a , a , a )
index += 1
lowercase_ : Any = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowercase_ : Optional[int] = lexicon[curr_string]
result += last_match_id
return result
def _UpperCAmelCase ( a : str , a : str ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = os.path.getsize(a )
lowercase_ : Dict = bin(a )[2:]
lowercase_ : int = len(a )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCAmelCase ( a : str , a : str ) -> None:
"""simple docstring"""
lowercase_ : List[str] = 8
try:
with open(a , 'wb' ) as opened_file:
lowercase_ : str = [
to_write[i : i + byte_length]
for i in range(0 , len(a ) , a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(a , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCAmelCase ( a : str , a : str ) -> None:
"""simple docstring"""
lowercase_ : Tuple = read_file_binary(a )
lowercase_ : List[str] = compress_data(a )
lowercase_ : Any = add_file_length(a , a )
write_file_binary(a , a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 7 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 1 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A: List[Any] = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _UpperCAmelCase ( a : Optional[int] ) -> str:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _UpperCAmelCase ( a : List[str] , a : List[Any] ) -> int:
"""simple docstring"""
if args.student_type == "roberta":
lowercase_ : int = False
elif args.student_type == "gpt2":
lowercase_ : List[Any] = False
def _UpperCAmelCase ( a : Union[str, Any] , a : str ) -> Tuple:
"""simple docstring"""
if args.student_type == "roberta":
lowercase_ : Optional[Any] = False
def _UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Any = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=a , required=a , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=a , required=a , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=a , choices=['distilbert', 'roberta', 'gpt2'] , required=a , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=a , required=a , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=a , type=a , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=a , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=a , required=a , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=a , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=a , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=a , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=a , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=a , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=a , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=a , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=a , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=a , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=a , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=a , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=a , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=a , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=a , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=a , default=5_0 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=a , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=a , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=a , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=a , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=a , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=a , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=a , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=a , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=a , default=5_6 , help='Random seed' )
parser.add_argument('--log_interval' , type=a , default=5_0_0 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=a , default=4_0_0_0 , help='Checkpoint interval.' )
lowercase_ : str = parser.parse_args()
sanity_checks(a )
# ARGS #
init_gpu_params(a )
set_seed(a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(a ) , a , indent=4 )
git_log(args.dump_path )
lowercase_ , lowercase_ , lowercase_ : Any = MODEL_CLASSES[args.student_type]
lowercase_ , lowercase_ , lowercase_ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase_ : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase_ : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase_ : str = tokenizer.all_special_tokens.index(a )
lowercase_ : str = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
lowercase_ : Union[str, Any] = special_tok_ids
lowercase_ : Optional[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , 'rb' ) as fp:
lowercase_ : Union[str, Any] = pickle.load(a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , 'rb' ) as fp:
lowercase_ : Optional[int] = pickle.load(a )
lowercase_ : List[Any] = np.maximum(a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase_ : Dict = 0.0 # do not predict special tokens
lowercase_ : List[str] = torch.from_numpy(a )
else:
lowercase_ : int = None
lowercase_ : Optional[int] = LmSeqsDataset(params=a , data=a )
logger.info('Data loader created.' )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
lowercase_ : str = student_config_class.from_pretrained(args.student_config )
lowercase_ : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
lowercase_ : Any = student_model_class.from_pretrained(args.student_pretrained_weights , config=a )
else:
lowercase_ : List[Any] = student_model_class(a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info('Student loaded.' )
# TEACHER #
lowercase_ : Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(a , a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(a , a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase_ : Optional[Any] = Distiller(
params=a , dataset=a , token_probs=a , student=a , teacher=a )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : list , a : int ) -> Optional[Any]:
"""simple docstring"""
# Checks if the entire collection has been sorted
if len(a ) <= 1 or n <= 1:
return
insert_next(a , n - 1 )
rec_insertion_sort(a , n - 1 )
def _UpperCAmelCase ( a : list , a : int ) -> Dict:
"""simple docstring"""
# Checks order between adjacent elements
if index >= len(a ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowercase_ , lowercase_ : Tuple = (
collection[index],
collection[index - 1],
)
insert_next(a , index + 1 )
if __name__ == "__main__":
A: str = input("Enter integers separated by spaces: ")
A: list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 7 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A: Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : str ) -> YolosConfig:
"""simple docstring"""
lowercase_ : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase_ : Any = 1_9_2
lowercase_ : Tuple = 7_6_8
lowercase_ : Dict = 1_2
lowercase_ : Optional[Any] = 3
lowercase_ : Any = [8_0_0, 1_3_3_3]
lowercase_ : Optional[int] = False
elif yolos_name == "yolos_s_dWr":
lowercase_ : Any = 3_3_0
lowercase_ : int = 1_4
lowercase_ : Dict = 6
lowercase_ : Optional[int] = 1_3_2_0
elif "yolos_s" in yolos_name:
lowercase_ : List[str] = 3_8_4
lowercase_ : List[Any] = 1_5_3_6
lowercase_ : Dict = 1_2
lowercase_ : Any = 6
elif "yolos_b" in yolos_name:
lowercase_ : Any = [8_0_0, 1_3_4_4]
lowercase_ : Optional[Any] = 9_1
lowercase_ : Optional[int] = 'huggingface/label-files'
lowercase_ : Tuple = 'coco-detection-id2label.json'
lowercase_ : Dict = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[str] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : Union[str, Any] = idalabel
lowercase_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( a : dict , a : YolosConfig , a : bool = False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : Any = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowercase_ : str = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : List[Any] = in_proj_weight[: config.hidden_size, :]
lowercase_ : Any = in_proj_bias[: config.hidden_size]
lowercase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : List[Any] = in_proj_weight[-config.hidden_size :, :]
lowercase_ : Tuple = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
if "backbone" in name:
lowercase_ : str = name.replace('backbone' , 'vit' )
if "cls_token" in name:
lowercase_ : Optional[int] = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
lowercase_ : Any = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
lowercase_ : int = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
lowercase_ : Tuple = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase_ : Optional[int] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
lowercase_ : Optional[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase_ : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : List[str] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : str = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
lowercase_ : Optional[int] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
lowercase_ : Optional[Any] = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
lowercase_ : Optional[Any] = name.replace('vit.norm' , 'vit.layernorm' )
return name
def _UpperCAmelCase ( a : dict , a : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase_ : List[str] = orig_state_dict.pop(a )
if "qkv" in key:
lowercase_ : Optional[Any] = key.split('.' )
lowercase_ : Tuple = int(key_split[2] )
lowercase_ : Dict = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase_ : str = val[:dim, :]
lowercase_ : Optional[Any] = val[
dim : dim * 2, :
]
lowercase_ : List[str] = val[-dim:, :]
else:
lowercase_ : Tuple = val[:dim]
lowercase_ : Union[str, Any] = val[dim : dim * 2]
lowercase_ : Union[str, Any] = val[-dim:]
else:
lowercase_ : str = val
return orig_state_dict
def _UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
lowercase_ : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Tuple = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( a : str , a : str , a : str , a : bool = False ) -> Dict:
"""simple docstring"""
lowercase_ : Tuple = get_yolos_config(a )
# load original state_dict
lowercase_ : Union[str, Any] = torch.load(a , map_location='cpu' )['model']
# load 🤗 model
lowercase_ : Tuple = YolosForObjectDetection(a )
model.eval()
lowercase_ : Tuple = convert_state_dict(a , a )
model.load_state_dict(a )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase_ : Optional[Any] = 8_0_0 if yolos_name != 'yolos_ti' else 5_1_2
lowercase_ : Any = YolosImageProcessor(format='coco_detection' , size=a )
lowercase_ : Any = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase_ : int = model(**a )
lowercase_ , lowercase_ : str = outputs.logits, outputs.pred_boxes
lowercase_ , lowercase_ : int = None, None
if yolos_name == "yolos_ti":
lowercase_ : Tuple = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowercase_ : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowercase_ : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowercase_ : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowercase_ : Union[str, Any] = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowercase_ : List[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowercase_ : List[str] = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowercase_ : List[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowercase_ : Dict = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowercase_ : Optional[Any] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , a , atol=1e-4 )
Path(a ).mkdir(exist_ok=a )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a )
if push_to_hub:
lowercase_ : List[str] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
lowercase_ : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(a , organization='hustvl' )
model.push_to_hub(a , organization='hustvl' )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A: Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 1 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCAmelCase ( a : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(a , a ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(a ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 1_0:
raise ValueError('number of qubits too large to simulate(>10).' )
lowercase_ : str = QuantumRegister(a , 'qr' )
lowercase_ : int = ClassicalRegister(a , 'cr' )
lowercase_ : Tuple = QuantumCircuit(a , a )
lowercase_ : Union[str, Any] = number_of_qubits
for i in range(a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , a , a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a , a )
# simulate with 10000 shots
lowercase_ : int = Aer.get_backend('qasm_simulator' )
lowercase_ : Tuple = execute(a , a , shots=1_0_0_0_0 )
return job.result().get_counts(a )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: Optional[int] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 7 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 1 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A: Dict = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
lowercase_ : Any = ''
while len(a ) % 3 != 0:
lowercase_ : str = '0' + bin_string
lowercase_ : Any = [
bin_string[index : index + 3]
for index in range(len(a ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase_ : Optional[Any] = 0
for index, val in enumerate(a ):
oct_val += int(2 ** (2 - index) * int(a ) )
oct_string += str(a )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : int ) -> bool:
"""simple docstring"""
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
A: Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SpeechTaTokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Dict = SpeechTaTokenizer(_lowercase )
lowercase_ : Tuple = AddedToken('<mask>' , lstrip=_lowercase , rstrip=_lowercase )
lowercase_ : List[str] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
lowercase_ : Union[str, Any] = 'this is a test'
lowercase_ : Dict = 'this is a test'
return input_text, output_text
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , _lowercase=20 , _lowercase=5 ) -> List[Any]:
lowercase_ , lowercase_ : Optional[Any] = self.get_input_output_texts(_lowercase )
lowercase_ : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
lowercase_ : Optional[int] = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase )
return text, ids
def lowerCamelCase__ ( self ) -> int:
lowercase_ : List[str] = '<pad>'
lowercase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCamelCase__ ( self ) -> List[Any]:
lowercase_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_lowercase ) , 81 )
def lowerCamelCase__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Tuple = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowercase_ : List[Any] = tokenizer.vocab_size
lowercase_ : List[str] = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase_ : int = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowercase_ : str = tokenizer.add_tokens(_lowercase )
lowercase_ : Optional[int] = tokenizer.vocab_size
lowercase_ : Optional[int] = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , len(_lowercase ) )
self.assertEqual(_lowercase , all_size + len(_lowercase ) )
lowercase_ : Any = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_lowercase )
self.assertGreaterEqual(len(_lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowercase_ : Tuple = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowercase_ : Tuple = tokenizer.add_special_tokens(_lowercase )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : List[Any] = len(_lowercase )
self.assertNotEqual(_lowercase , 0 )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , len(_lowercase ) )
self.assertEqual(_lowercase , all_size_a + len(_lowercase ) )
lowercase_ : Union[str, Any] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_lowercase )
self.assertGreaterEqual(len(_lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase__ ( self ) -> Optional[int]:
pass
def lowerCamelCase__ ( self ) -> Optional[Any]:
pass
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Tuple = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_lowercase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowercase_ : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
# fmt: off
self.assertListEqual(_lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCamelCase__ ( self ) -> str:
# Use custom sequence because this tokenizer does not handle numbers.
lowercase_ : List[Any] = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowercase_ : Tuple = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_lowercase , )
| 7 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
A: Optional[int] = logging.getLogger(__name__)
A: str = tf.data.AUTOTUNE
def _UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
lowercase_ : int = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=a , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=a , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=a , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=a , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=a , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=a , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=a , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=a , default=2**1_8 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=a , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=a , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=a , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=a , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=a , default=5_1_2 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=a , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=a , required=a , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=a , help='Model ID to upload to on the Hugging Face Hub.' )
lowercase_ : Any = parser.parse_args()
return args
def _UpperCAmelCase ( a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
lowercase_ : Any = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase_ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(a )
tf.tpu.experimental.initialize_tpu_system(a )
return tpu
def _UpperCAmelCase ( a : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[Any] = 0
for file in file_list:
lowercase_ : Optional[int] = file.split('/' )[-1]
lowercase_ : List[str] = re.search(R'-\d+-(\d+)\.tfrecord' , a ).group(1 )
lowercase_ : Optional[int] = int(a )
num_samples += sample_count
return num_samples
def _UpperCAmelCase ( a : Dict , a : List[Any] , a : Tuple , a : Union[str, Any] , a : Union[str, Any] , a : List[Any]=None ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[int] = count_samples(a )
lowercase_ : List[Any] = tf.data.Dataset.from_tensor_slices(a )
if shuffle:
lowercase_ : Union[str, Any] = dataset.shuffle(len(a ) )
lowercase_ : Dict = tf.data.TFRecordDataset(a , num_parallel_reads=a )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase_ : Optional[Any] = dataset.apply(tf.data.experimental.assert_cardinality(a ) )
lowercase_ : int = dataset.map(a , num_parallel_calls=a )
if shuffle:
assert shuffle_buffer_size is not None
lowercase_ : Union[str, Any] = dataset.shuffle(args.shuffle_buffer_size )
lowercase_ : List[str] = dataset.batch(a , drop_remainder=a )
lowercase_ : List[Any] = dataset.map(a , num_parallel_calls=a )
lowercase_ : List[str] = dataset.prefetch(a )
return dataset
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not args.no_tpu:
lowercase_ : Optional[Any] = initialize_tpu(a )
lowercase_ : Optional[int] = tf.distribute.TPUStrategy(a )
else:
lowercase_ : Dict = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase_ : Any = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase_ : str = tokenizer.vocab_size
lowercase_ : int = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
lowercase_ : str = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
lowercase_ : Any = count_samples(a )
lowercase_ : List[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase_ : Optional[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase_ : List[Any] = TFAutoModelForMaskedLM.from_config(a )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase_ , lowercase_ : Optional[int] = create_optimizer(
num_train_steps=a , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=a , metrics=['accuracy'] )
def decode_fn(a : Any ):
lowercase_ : int = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(a , a )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase_ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=a , mlm_probability=args.mlm_probability , mlm=a , return_tensors='tf' )
def mask_with_collator(a : int ):
# TF really needs an isin() function
lowercase_ : Union[str, Any] = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
lowercase_ , lowercase_ : List[str] = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(a ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=a , )
return batch
lowercase_ : Union[str, Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase_ : str = prepare_dataset(
a , decode_fn=a , mask_fn=a , batch_size=a , shuffle=a , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase_ : List[str] = prepare_dataset(
a , decode_fn=a , mask_fn=a , batch_size=a , shuffle=a , )
lowercase_ : List[str] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=a ) )
model.fit(
a , validation_data=a , epochs=args.num_epochs , callbacks=a , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
A: Tuple = parse_args()
main(args)
| 7 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : UNetaDModel
SCREAMING_SNAKE_CASE_ : KarrasVeScheduler
def __init__( self , _lowercase , _lowercase ) -> Any:
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , **_lowercase , ) -> Union[Tuple, ImagePipelineOutput]:
lowercase_ : int = self.unet.config.sample_size
lowercase_ : Optional[Any] = (batch_size, 3, img_size, img_size)
lowercase_ : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowercase_ : Any = self.scheduler.schedule[t]
lowercase_ : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowercase_ , lowercase_ : Union[str, Any] = self.scheduler.add_noise_to_input(_lowercase , _lowercase , generator=_lowercase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase_ : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowercase_ : Optional[Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase_ : Dict = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowercase_ : Tuple = self.scheduler.step_correct(
_lowercase , _lowercase , _lowercase , _lowercase , step_output.prev_sample , step_output['derivative'] , )
lowercase_ : Dict = step_output.prev_sample
lowercase_ : Tuple = (sample / 2 + 0.5).clamp(0 , 1 )
lowercase_ : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ : Optional[int] = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : list[float] , a : list[float] ) -> float:
"""simple docstring"""
lowercase_ : Any = sorted(numsa + numsa )
lowercase_ , lowercase_ : List[str] = divmod(len(a ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A: int = [float(x) for x in input("Enter the elements of first array: ").split()]
A: Any = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 7 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
A: Optional[Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
A: int = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
A: int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _UpperCAmelCase ( a : str , a : str ) -> tuple[str, float]:
"""simple docstring"""
lowercase_ : List[str] = len([g for position, g in enumerate(a ) if g == main_target[position]] )
return (item, float(a ))
def _UpperCAmelCase ( a : str , a : str ) -> tuple[str, str]:
"""simple docstring"""
lowercase_ : Dict = random.randint(0 , len(a ) - 1 )
lowercase_ : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
lowercase_ : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _UpperCAmelCase ( a : str , a : list[str] ) -> str:
"""simple docstring"""
lowercase_ : int = list(a )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase_ : List[str] = random.choice(a )
return "".join(a )
def _UpperCAmelCase ( a : tuple[str, float] , a : list[tuple[str, float]] , a : list[str] , ) -> list[str]:
"""simple docstring"""
lowercase_ : int = []
# Generate more children proportionally to the fitness score.
lowercase_ : List[str] = int(parent_a[1] * 1_0_0 ) + 1
lowercase_ : Optional[Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(a ):
lowercase_ : List[str] = population_score[random.randint(0 , a )][0]
lowercase_ , lowercase_ : str = crossover(parent_a[0] , a )
# Append new string to the population list.
pop.append(mutate(a , a ) )
pop.append(mutate(a , a ) )
return pop
def _UpperCAmelCase ( a : str , a : list[str] , a : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowercase_ : str = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase_ : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase_ : Any = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a )
# Generate random starting population.
lowercase_ : Tuple = []
for _ in range(a ):
population.append(''.join([random.choice(a ) for i in range(len(a ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase_ , lowercase_ : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase_ : Optional[Any] = [evaluate(a , a ) for item in population]
# Check if there is a matching evolution.
lowercase_ : int = sorted(a , key=lambda a : x[1] , reverse=a )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase_ : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a )
# Normalize population score to be between 0 and 1.
lowercase_ : Union[str, Any] = [
(item, score / len(a )) for item, score in population_score
]
# This is selection
for i in range(a ):
population.extend(select(population_score[int(a )] , a , a ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a ) > N_POPULATION:
break
if __name__ == "__main__":
A: Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
A: Any = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
A , A , A: Union[str, Any] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 1 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase=125 , _lowercase=None , **_lowercase , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ : Dict = [f"<extra_id_{i}>" for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase_ : Union[str, Any] = len(set(filter(lambda _lowercase : bool('extra_id' in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
lowercase_ : Optional[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
lowercase_ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
lowercase_ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
super().__init__(
eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
lowercase_ : Dict = extra_ids
lowercase_ : List[str] = 2**8 # utf is 8 bits
# define special tokens dict
lowercase_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowercase_ : Optional[Any] = len(self.special_tokens_encoder )
lowercase_ : Optional[int] = len(_lowercase )
for i, token in enumerate(_lowercase ):
lowercase_ : Dict = self.vocab_size + i - n
lowercase_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self ) -> Dict:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowercase )) + [1]
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def lowerCamelCase__ ( self , _lowercase ) -> List[int]:
if len(_lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
lowercase_ : List[Any] = self._add_eos_if_not_present(_lowercase )
if token_ids_a is None:
return token_ids_a
else:
lowercase_ : List[str] = self._add_eos_if_not_present(_lowercase )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self , _lowercase ) -> List[str]:
lowercase_ : Optional[int] = [chr(_lowercase ) for i in text.encode('utf-8' )]
return tokens
def lowerCamelCase__ ( self , _lowercase ) -> str:
if token in self.special_tokens_encoder:
lowercase_ : Dict = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowercase_ : str = self.added_tokens_encoder[token]
elif len(_lowercase ) != 1:
lowercase_ : Tuple = self.unk_token_id
else:
lowercase_ : Optional[Any] = ord(_lowercase ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self , _lowercase ) -> List[Any]:
if index in self.special_tokens_decoder:
lowercase_ : Optional[int] = self.special_tokens_decoder[index]
else:
lowercase_ : Tuple = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self , _lowercase ) -> int:
lowercase_ : Optional[Any] = B''
for token in tokens:
if token in self.special_tokens_decoder:
lowercase_ : Tuple = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
lowercase_ : Any = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
lowercase_ : Dict = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
lowercase_ : Dict = token.encode('utf-8' )
else:
lowercase_ : Union[str, Any] = bytes([ord(_lowercase )] )
bstring += tok_string
lowercase_ : List[str] = bstring.decode('utf-8' , errors='ignore' )
return string
def lowerCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
return ()
| 7 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 1 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _UpperCAmelCase ( a : SplitDict ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = split_dict._to_yaml_list()
assert len(a ) == len(a )
lowercase_ : Optional[Any] = SplitDict._from_yaml_list(a )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase_ : Optional[int] = None
# the split name of split_dict takes over the name of the split info object
lowercase_ : Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=a ), SplitInfo(dataset_name='my_dataset' )] )
def _UpperCAmelCase ( a : Union[str, Any] ) -> int:
"""simple docstring"""
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowercase_ : List[str] = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 7 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: Optional[int] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'transfo-xl'
SCREAMING_SNAKE_CASE_ : int = ['mems']
SCREAMING_SNAKE_CASE_ : Tuple = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=26_7735 , _lowercase=[2_0000, 4_0000, 20_0000] , _lowercase=1024 , _lowercase=1024 , _lowercase=16 , _lowercase=64 , _lowercase=4096 , _lowercase=4 , _lowercase=False , _lowercase=18 , _lowercase=1600 , _lowercase=1000 , _lowercase=True , _lowercase=True , _lowercase=0 , _lowercase=-1 , _lowercase=True , _lowercase=0.1 , _lowercase=0.0 , _lowercase=True , _lowercase="normal" , _lowercase=0.01 , _lowercase=0.01 , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0 , **_lowercase , ) -> Optional[Any]:
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : str = []
self.cutoffs.extend(_lowercase )
if proj_share_all_but_first:
lowercase_ : List[Any] = [False] + [True] * len(self.cutoffs )
else:
lowercase_ : List[str] = [False] + [False] * len(self.cutoffs )
lowercase_ : str = d_model
lowercase_ : int = d_embed
lowercase_ : Dict = d_head
lowercase_ : Optional[int] = d_inner
lowercase_ : int = div_val
lowercase_ : List[str] = pre_lnorm
lowercase_ : Any = n_layer
lowercase_ : Dict = n_head
lowercase_ : Tuple = mem_len
lowercase_ : Optional[int] = same_length
lowercase_ : List[str] = attn_type
lowercase_ : Optional[int] = clamp_len
lowercase_ : int = sample_softmax
lowercase_ : List[str] = adaptive
lowercase_ : List[str] = dropout
lowercase_ : Union[str, Any] = dropatt
lowercase_ : int = untie_r
lowercase_ : Optional[Any] = init
lowercase_ : Union[str, Any] = init_range
lowercase_ : Any = proj_init_std
lowercase_ : List[Any] = init_std
lowercase_ : Optional[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowercase , **_lowercase )
@property
def lowerCamelCase__ ( self ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 7 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A: Any = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def _UpperCAmelCase ( a : np.ndarray , a : np.ndarray , a : np.ndarray , a : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
lowercase_ : str = np.shape(a )
lowercase_ : int = np.shape(a )
lowercase_ : Optional[int] = np.shape(a )
if shape_a[0] != shape_b[0]:
lowercase_ : Any = (
'Expected the same number of rows for A and B. '
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(a )
if shape_b[1] != shape_c[1]:
lowercase_ : Dict = (
'Expected the same number of columns for B and C. '
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(a )
lowercase_ : Dict = pseudo_inv
if a_inv is None:
try:
lowercase_ : Optional[int] = np.linalg.inv(a )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> None:
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Any = schur_complement(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[int] = np.block([[a, b], [b.T, c]] )
lowercase_ : str = np.linalg.det(_lowercase )
lowercase_ : Optional[int] = np.linalg.det(_lowercase )
lowercase_ : Tuple = np.linalg.det(_lowercase )
self.assertAlmostEqual(_lowercase , det_a * det_s )
def lowerCamelCase__ ( self ) -> None:
lowercase_ : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Optional[Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowercase ):
schur_complement(_lowercase , _lowercase , _lowercase )
def lowerCamelCase__ ( self ) -> None:
lowercase_ : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowercase ):
schur_complement(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 7 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
A: Dict = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 7 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_0_0, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 1 |
'''simple docstring'''
A: str = 2_5_6
# Modulus to hash a string
A: List[Any] = 1_0_0_0_0_0_3
def _UpperCAmelCase ( a : str , a : str ) -> bool:
"""simple docstring"""
lowercase_ : Optional[Any] = len(a )
lowercase_ : List[str] = len(a )
if p_len > t_len:
return False
lowercase_ : Tuple = 0
lowercase_ : Tuple = 0
lowercase_ : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(a ):
lowercase_ : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase_ : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase_ : Union[str, Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase_ : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _UpperCAmelCase ( ) -> None:
"""simple docstring"""
lowercase_ : List[Any] = 'abc1abc12'
lowercase_ : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowercase_ : List[Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(a , a ) and not rabin_karp(a , a )
# Test 2)
lowercase_ : List[str] = 'ABABX'
lowercase_ : Optional[Any] = 'ABABZABABYABABX'
assert rabin_karp(a , a )
# Test 3)
lowercase_ : str = 'AAAB'
lowercase_ : Union[str, Any] = 'ABAAAAAB'
assert rabin_karp(a , a )
# Test 4)
lowercase_ : Dict = 'abcdabcy'
lowercase_ : str = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(a , a )
# Test 5)
lowercase_ : List[str] = 'Lü'
lowercase_ : List[str] = 'Lüsai'
assert rabin_karp(a , a )
lowercase_ : str = 'Lue'
assert not rabin_karp(a , a )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 7 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Optional[Any] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'xmod'
def __init__( self , _lowercase=3_0522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , _lowercase=False , _lowercase=2 , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=("en_XX",) , _lowercase=None , **_lowercase , ) -> Dict:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Dict = vocab_size
lowercase_ : int = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : str = hidden_act
lowercase_ : List[Any] = intermediate_size
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : List[Any] = initializer_range
lowercase_ : str = layer_norm_eps
lowercase_ : Optional[Any] = position_embedding_type
lowercase_ : Any = use_cache
lowercase_ : int = classifier_dropout
lowercase_ : Union[str, Any] = pre_norm
lowercase_ : str = adapter_reduction_factor
lowercase_ : List[Any] = adapter_layer_norm
lowercase_ : str = adapter_reuse_layer_norm
lowercase_ : str = ln_before_adapter
lowercase_ : Any = list(_lowercase )
lowercase_ : Dict = default_language
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[int]] , a : int , a : int , a : set ) -> int:
"""simple docstring"""
lowercase_ , lowercase_ : int = len(a ), len(grid[0] )
if (
min(a , a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase_ : List[Any] = 0
count += depth_first_search(a , row + 1 , a , a )
count += depth_first_search(a , row - 1 , a , a )
count += depth_first_search(a , a , col + 1 , a )
count += depth_first_search(a , a , col - 1 , a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while second != 0:
lowercase_ : Any = first & second
first ^= second
lowercase_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = int(input("Enter the first number: ").strip())
A: Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 7 | 1 |
'''simple docstring'''
A: str = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 7 |
'''simple docstring'''
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Dict = n
lowercase_ : Dict = [None] * self.n
lowercase_ : Tuple = 0 # index of the first element
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = 0
def __len__( self ) -> int:
return self.size
def lowerCamelCase__ ( self ) -> bool:
return self.size == 0
def lowerCamelCase__ ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase__ ( self , _lowercase ) -> Any:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
lowercase_ : Tuple = data
lowercase_ : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase__ ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
lowercase_ : Dict = self.array[self.front]
lowercase_ : Tuple = None
lowercase_ : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 7 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (DPMSolverSinglestepScheduler,)
SCREAMING_SNAKE_CASE_ : Any = (('num_inference_steps', 2_5),)
def lowerCamelCase__ ( self , **_lowercase ) -> Tuple:
lowercase_ : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**_lowercase )
return config
def lowerCamelCase__ ( self , _lowercase=0 , **_lowercase ) -> Dict:
lowercase_ : Union[str, Any] = dict(self.forward_default_kwargs )
lowercase_ : List[Any] = kwargs.pop('num_inference_steps' , _lowercase )
lowercase_ : List[str] = self.dummy_sample
lowercase_ : Dict = 0.1 * sample
lowercase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ : Optional[int] = self.get_scheduler_config(**_lowercase )
lowercase_ : List[str] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase_ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase_ : int = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase_ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ , lowercase_ : Any = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1 ):
lowercase_ : Optional[int] = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : List[Any] = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ) -> List[str]:
pass
def lowerCamelCase__ ( self , _lowercase=0 , **_lowercase ) -> List[str]:
lowercase_ : int = dict(self.forward_default_kwargs )
lowercase_ : int = kwargs.pop('num_inference_steps' , _lowercase )
lowercase_ : Any = self.dummy_sample
lowercase_ : Optional[Any] = 0.1 * sample
lowercase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ : List[str] = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase_ : Any = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ : Dict = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Union[str, Any] = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self , _lowercase=None , **_lowercase ) -> Optional[Any]:
if scheduler is None:
lowercase_ : Union[str, Any] = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config(**_lowercase )
lowercase_ : str = scheduler_class(**_lowercase )
lowercase_ : Optional[Any] = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config(**_lowercase )
lowercase_ : int = scheduler_class(**_lowercase )
lowercase_ : Tuple = 10
lowercase_ : List[Any] = self.dummy_model()
lowercase_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : str = model(_lowercase , _lowercase )
lowercase_ : Dict = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase_ : Tuple = 50
lowercase_ : int = self.dummy_model()
lowercase_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowercase_ : int = model(_lowercase , _lowercase )
lowercase_ : Any = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
lowercase_ : Any = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.25_74 ) < 1E-3
def lowerCamelCase__ ( self ) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowerCamelCase__ ( self ) -> List[str]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase_ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase_ : List[str] = self.full_loop(scheduler=_lowercase )
lowercase_ : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
lowercase_ : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowercase_ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase_ : int = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase_ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase_ : Any = self.full_loop(scheduler=_lowercase )
lowercase_ : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def lowerCamelCase__ ( self ) -> Dict:
self.check_over_configs(thresholding=_lowercase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='dpmsolver++' , solver_order=_lowercase , solver_type=_lowercase , )
def lowerCamelCase__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
lowercase_ : Union[str, Any] = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase ).any(), "Samples have nan numbers"
def lowerCamelCase__ ( self ) -> int:
self.check_over_configs(lower_order_final=_lowercase )
self.check_over_configs(lower_order_final=_lowercase )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase__ ( self ) -> Tuple:
self.check_over_configs(variance_type=_lowercase )
self.check_over_configs(variance_type='learned_range' )
def lowerCamelCase__ ( self ) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0 )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Tuple = self.full_loop()
lowercase_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[Any] = self.full_loop(use_karras_sigmas=_lowercase )
lowercase_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.22_48 ) < 1E-3
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Union[str, Any] = self.full_loop(prediction_type='v_prediction' )
lowercase_ : str = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.14_53 ) < 1E-3
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=_lowercase )
lowercase_ : List[str] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 0.06_49 ) < 1E-3
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[int] = self.scheduler_classes[0]
lowercase_ : List[str] = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0 )
lowercase_ : Optional[int] = scheduler_class(**_lowercase )
lowercase_ : str = 10
lowercase_ : List[str] = self.dummy_model()
lowercase_ : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : Optional[int] = model(_lowercase , _lowercase )
lowercase_ : int = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
assert sample.dtype == torch.floataa
| 7 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( a : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
A: List[Any] = int(input("Enter number: ").strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( a : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase_ : str = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowercase_ : List[str] = BeautifulSoup(requests.get(a ).text , 'html.parser' )
lowercase_ : str = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 7 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'speech_to_text'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowercase=1_0000 , _lowercase=12 , _lowercase=2048 , _lowercase=4 , _lowercase=6 , _lowercase=2048 , _lowercase=4 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=256 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=2 , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=6000 , _lowercase=1024 , _lowercase=2 , _lowercase=(5, 5) , _lowercase=1024 , _lowercase=80 , _lowercase=1 , **_lowercase , ) -> List[str]:
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : Optional[Any] = encoder_ffn_dim
lowercase_ : Tuple = encoder_layers
lowercase_ : int = encoder_attention_heads
lowercase_ : List[str] = decoder_ffn_dim
lowercase_ : List[str] = decoder_layers
lowercase_ : List[str] = decoder_attention_heads
lowercase_ : List[Any] = dropout
lowercase_ : List[Any] = attention_dropout
lowercase_ : List[str] = activation_dropout
lowercase_ : List[Any] = activation_function
lowercase_ : Any = init_std
lowercase_ : List[Any] = encoder_layerdrop
lowercase_ : Dict = decoder_layerdrop
lowercase_ : str = use_cache
lowercase_ : List[Any] = encoder_layers
lowercase_ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Any = max_source_positions
lowercase_ : str = max_target_positions
lowercase_ : Dict = num_conv_layers
lowercase_ : List[str] = list(_lowercase )
lowercase_ : Optional[int] = conv_channels
lowercase_ : Optional[int] = input_feat_per_channel
lowercase_ : int = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any]=1_0 ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for _ in range(a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _UpperCAmelCase ( a : List[Any] , a : Union[str, Any]=1_0 ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = []
for step in range(a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Dict = os.path.join(a , 'schedule.bin' )
torch.save(scheduler.state_dict() , a )
lowercase_ : Tuple = torch.load(a )
scheduler.load_state_dict(a )
return lrs
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_lowercase )
lowercase_ : List[Any] = torch.tensor([0.4, 0.2, -0.5] )
lowercase_ : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase_ : int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
lowercase_ : Optional[Any] = criterion(_lowercase , _lowercase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_lowercase )
lowercase_ : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] )
lowercase_ : Dict = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase_ : Optional[Any] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_lowercase , weight_decay=0.0 , relative_step=_lowercase , scale_parameter=_lowercase , warmup_init=_lowercase , )
for _ in range(1000 ):
lowercase_ : str = criterion(_lowercase , _lowercase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(5_0, 5_0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE_ : Optional[Any] = AdamW(m.parameters(), lr=10.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_0
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Union[str, Any]:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase , msg=_lowercase )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : int = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowercase_ : Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
lowercase_ , lowercase_ : Union[str, Any] = data
lowercase_ : Tuple = scheduler_func(self.optimizer , **_lowercase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowercase_ : Union[str, Any] = unwrap_schedule(_lowercase , self.num_steps )
self.assertListAlmostEqual(
_lowercase , _lowercase , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
lowercase_ : int = scheduler_func(self.optimizer , **_lowercase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_lowercase ) # wrap to test picklability of the schedule
lowercase_ : List[Any] = unwrap_and_save_reload_schedule(_lowercase , self.num_steps )
self.assertListEqual(_lowercase , _lowercase , msg=f"failed for {scheduler_func} in save and reload" )
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase ) -> int:
lowercase_ : Dict = fn
def __call__( self , *_lowercase , **_lowercase ) -> List[str]:
return self.fn(*_lowercase , **_lowercase )
@classmethod
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def _UpperCAmelCase ( a : Iterable[str] , a : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
lowercase_ : Union[str, Any] = iter(a )
while True:
lowercase_ : Optional[int] = tuple(itertools.islice(a , a ) )
if not chunk:
return
yield chunk
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : int = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase_ : int = ''
if len(a ) < 2:
return dirty
for i in range(len(a ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(a ) & 1:
clean += "X"
return clean
def _UpperCAmelCase ( a : str ) -> list[str]:
"""simple docstring"""
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowercase_ : Tuple = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase_ : Any = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(a )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(a )
return table
def _UpperCAmelCase ( a : str , a : str ) -> str:
"""simple docstring"""
lowercase_ : List[str] = generate_table(a )
lowercase_ : Optional[Any] = prepare_input(a )
lowercase_ : Any = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a , 2 ):
lowercase_ , lowercase_ : Tuple = divmod(table.index(a ) , 5 )
lowercase_ , lowercase_ : List[Any] = divmod(table.index(a ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _UpperCAmelCase ( a : str , a : str ) -> str:
"""simple docstring"""
lowercase_ : Optional[int] = generate_table(a )
lowercase_ : int = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(a , 2 ):
lowercase_ , lowercase_ : Union[str, Any] = divmod(table.index(a ) , 5 )
lowercase_ , lowercase_ : str = divmod(table.index(a ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 7 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = DebertaTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = DebertaTokenizerFast
def lowerCamelCase__ ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
lowercase_ : Any = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase_ : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase_ : int = {'unk_token': '[UNK]'}
lowercase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowercase ) )
def lowerCamelCase__ ( self , **_lowercase ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Any:
lowercase_ : str = 'lower newer'
lowercase_ : List[str] = 'lower newer'
return input_text, output_text
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = self.get_tokenizer()
lowercase_ : Any = 'lower newer'
lowercase_ : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase_ : Optional[Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
lowercase_ : Any = tokens + [tokenizer.unk_token]
lowercase_ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = self.get_tokenizer()
lowercase_ : int = tokenizer('Hello' , 'World' )
lowercase_ : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , _lowercase )
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : List[Any] = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowercase_ : Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=_lowercase )
lowercase_ : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowercase )
lowercase_ : Tuple = tokenizer.encode(
'sequence builders' , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
lowercase_ : Dict = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowercase )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase_ : List[Any] = tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowercase_ : str = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
lowercase_ : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
lowercase_ : Optional[int] = [tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) for seq in encoding['input_ids']]
# fmt: off
lowercase_ : Any = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase_ : Any = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , _lowercase )
for expected, decoded in zip(_lowercase , _lowercase ):
self.assertEqual(_lowercase , _lowercase )
| 7 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 1 |
'''simple docstring'''
import re
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
if len(re.findall('[ATCG]' , a ) ) != len(a ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.