code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class A_ ( _a , _a ):
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = False
lowerCAmelCase__ = TimmBackboneConfig
def __init__( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
requires_backends(self ,"timm" )
super().__init__(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(__lowerCAmelCase ,"out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_lowerCamelCase : Union[str, Any] = getattr(__lowerCAmelCase ,"use_pretrained_backbone" ,__lowerCAmelCase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_lowerCamelCase : int = config.out_indices if getattr(__lowerCAmelCase ,"out_indices" ,__lowerCAmelCase ) is not None else (-1,)
_lowerCamelCase : List[Any] = timm.create_model(
config.backbone ,pretrained=__lowerCAmelCase ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=__lowerCAmelCase ,**__lowerCAmelCase ,)
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_lowerCamelCase : Tuple = self._backbone.return_layers
_lowerCamelCase : Tuple = {layer["module"]: str(__lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__lowerCAmelCase )
@classmethod
def _lowercase ( cls: Union[str, Any] ,__lowerCAmelCase: Tuple ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_lowerCamelCase : List[Any] = kwargs.pop("config" ,TimmBackboneConfig() )
_lowerCamelCase : Tuple = kwargs.pop("use_timm_backbone" ,__lowerCAmelCase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_lowerCamelCase : str = kwargs.pop("num_channels" ,config.num_channels )
_lowerCamelCase : int = kwargs.pop("features_only" ,config.features_only )
_lowerCamelCase : Dict = kwargs.pop("use_pretrained_backbone" ,config.use_pretrained_backbone )
_lowerCamelCase : List[Any] = kwargs.pop("out_indices" ,config.out_indices )
_lowerCamelCase : Tuple = TimmBackboneConfig(
backbone=__lowerCAmelCase ,num_channels=__lowerCAmelCase ,features_only=__lowerCAmelCase ,use_pretrained_backbone=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,)
return super()._from_config(__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[str]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_lowerCamelCase : Union[str, Any] = self._all_layers
_lowerCamelCase : Tuple = self._backbone(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = self._return_layers
_lowerCamelCase : List[Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
_lowerCamelCase : Dict = self._backbone(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[str] = tuple(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tuple(__lowerCAmelCase ) if hidden_states is not None else None
if not return_dict:
_lowerCamelCase : Any = (feature_maps,)
if output_hidden_states:
_lowerCamelCase : Any = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__lowerCAmelCase ,hidden_states=__lowerCAmelCase ,attentions=__lowerCAmelCase )
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowerCamelCase_( _lowerCamelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase : List[str] = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCamelCase ).json()
def lowerCamelCase_( _lowerCamelCase = 10 ) -> list[dict]:
'''simple docstring'''
_lowerCamelCase : List[str] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_lowerCamelCase : List[Any] = requests.get(_lowerCamelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCamelCase ) for story_id in story_ids]
def lowerCamelCase_( _lowerCamelCase = 10 ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = hackernews_top_stories(_lowerCamelCase )
return "\n".join("* [{title}]({url})".format(**_lowerCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class A_ :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ""
_lowerCamelCase : int = ""
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = 256
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[str] = 0
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = cva.imread(__lowerCAmelCase ,0 )
_lowerCamelCase : List[str] = copy.deepcopy(self.img )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = plt.hist(self.img.ravel() ,256 ,[0, 256] ,label="x" )
_lowerCamelCase : Union[str, Any] = np.sum(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : Tuple = x[i] / self.k
self.sk += prk
_lowerCamelCase : Tuple = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase : Tuple = int(last % last )
_lowerCamelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase : Optional[Any] = self.last_list[num]
cva.imwrite("output_data/output.jpg" ,self.img )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
plt.hist(self.img.ravel() ,256 ,[0, 256] )
def _lowercase ( self: Tuple ):
'''simple docstring'''
cva.imshow("Output-Image" ,self.img )
cva.imshow("Input-Image" ,self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowerCAmelCase : List[str] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_lowerCAmelCase : Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
from typing import Any
def lowerCamelCase_( _lowerCamelCase ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
_lowerCamelCase : Dict = [input_list.count(_lowerCamelCase ) for value in input_list]
_lowerCamelCase : Optional[Any] = max(_lowerCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self: Dict ,**__lowerCAmelCase: str ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase : str = deprecated_arg[3:]
setattr(self ,__lowerCAmelCase ,not kwargs.pop(__lowerCAmelCase ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
_lowerCamelCase : Optional[Any] = kwargs.pop("torchscript" ,self.torchscript )
_lowerCamelCase : List[str] = kwargs.pop("torch_xla_tpu_print_metrics" ,self.torch_xla_tpu_print_metrics )
_lowerCamelCase : Optional[int] = kwargs.pop("fp16_opt_level" ,self.fpaa_opt_level )
super().__init__(**__lowerCAmelCase )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Trace the models using torchscript'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowerCAmelCase__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def _lowercase ( self: Tuple ):
'''simple docstring'''
requires_backends(self ,["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
_lowerCamelCase : Optional[int] = torch.device("cpu" )
_lowerCamelCase : Optional[int] = 0
elif is_torch_tpu_available():
_lowerCamelCase : Union[str, Any] = xm.xla_device()
_lowerCamelCase : Tuple = 0
else:
_lowerCamelCase : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_lowerCamelCase : Optional[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def _lowercase ( self: Any ):
'''simple docstring'''
requires_backends(self ,["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
requires_backends(self ,["torch"] )
return self._setup_devices[0]
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
requires_backends(self ,["torch"] )
return self._setup_devices[1]
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return self.n_gpu > 0
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
def is_in_circle(_lowerCamelCase , _lowerCamelCase ) -> bool:
_lowerCamelCase : int = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_lowerCamelCase : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
_lowerCamelCase : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(_lowerCamelCase , _lowerCamelCase ) ) for _ in range(_lowerCamelCase ) ) * (max_value - min_value)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = 1.0 ) -> None:
'''simple docstring'''
def identity_function(_lowerCamelCase ) -> float:
return x
_lowerCamelCase : Union[str, Any] = area_under_curve_estimator(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
def function_to_integrate(_lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
_lowerCamelCase : int = area_under_curve_estimator(
_lowerCamelCase , _lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : int = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
_lowerCAmelCase : List[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCAmelCase : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCAmelCase : Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = 0
for i in range(1 , int(sqrt(_lowerCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_lowerCamelCase ):
total += i + n // i
elif i == sqrt(_lowerCamelCase ):
total += i
return total - n
def lowerCamelCase_( _lowerCamelCase = 10000 ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = sum(
i
for i in range(1 , _lowerCamelCase )
if sum_of_divisors(sum_of_divisors(_lowerCamelCase ) ) == i and sum_of_divisors(_lowerCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A_ :
def __init__( self: List[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any]=13 ,__lowerCAmelCase: Union[str, Any]=7 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Any=99 ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: Tuple=2 ,__lowerCAmelCase: Union[str, Any]=4 ,__lowerCAmelCase: Any=37 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Union[str, Any]=512 ,__lowerCAmelCase: Dict=16 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: Union[str, Any]=4 ,__lowerCAmelCase: Any=None ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : str = 13
_lowerCamelCase : Dict = 7
_lowerCamelCase : Dict = True
_lowerCamelCase : str = True
_lowerCamelCase : int = True
_lowerCamelCase : Any = True
_lowerCamelCase : List[str] = 99
_lowerCamelCase : Tuple = 384
_lowerCamelCase : str = 2
_lowerCamelCase : int = 4
_lowerCamelCase : List[Any] = 37
_lowerCamelCase : int = "gelu"
_lowerCamelCase : Union[str, Any] = 0.1
_lowerCamelCase : Any = 0.1
_lowerCamelCase : Optional[Any] = 512
_lowerCamelCase : Union[str, Any] = 16
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = 0.02
_lowerCamelCase : Dict = 3
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Optional[int] = 128
_lowerCamelCase : int = 2
_lowerCamelCase : List[Any] = 9
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = None
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : Tuple = None
if self.use_input_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=__lowerCAmelCase ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TFConvBertModel(config=__lowerCAmelCase )
_lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCamelCase : List[str] = [input_ids, input_mask]
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: str ,__lowerCAmelCase: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = TFConvBertForMaskedLM(config=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Dict = TFConvBertForSequenceClassification(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.num_choices
_lowerCamelCase : List[str] = TFConvBertForMultipleChoice(config=__lowerCAmelCase )
_lowerCamelCase : Dict = tf.tile(tf.expand_dims(__lowerCAmelCase ,1 ) ,(1, self.num_choices, 1) )
_lowerCamelCase : int = tf.tile(tf.expand_dims(__lowerCAmelCase ,1 ) ,(1, self.num_choices, 1) )
_lowerCamelCase : str = tf.tile(tf.expand_dims(__lowerCAmelCase ,1 ) ,(1, self.num_choices, 1) )
_lowerCamelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : List[Any] = TFConvBertForTokenClassification(config=__lowerCAmelCase )
_lowerCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = TFConvBertForQuestionAnswering(config=__lowerCAmelCase )
_lowerCamelCase : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Dict = config_and_inputs
_lowerCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Dict = TFConvBertModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = True
_lowerCamelCase : int = True
if hasattr(__lowerCAmelCase ,"use_cache" ):
_lowerCamelCase : Any = True
_lowerCamelCase : int = getattr(self.model_tester ,"encoder_seq_length" ,self.model_tester.seq_length )
_lowerCamelCase : List[str] = getattr(self.model_tester ,"key_length" ,__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = len(model(__lowerCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase ,saved_model=__lowerCAmelCase )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,"saved_model" ,"1" )
_lowerCamelCase : Dict = tf.keras.models.load_model(__lowerCAmelCase )
_lowerCamelCase : Any = model(__lowerCAmelCase )
if self.is_encoder_decoder:
_lowerCamelCase : Union[str, Any] = outputs["encoder_hidden_states"]
_lowerCamelCase : Union[str, Any] = outputs["encoder_attentions"]
else:
_lowerCamelCase : Optional[Any] = outputs["hidden_states"]
_lowerCamelCase : Tuple = outputs["attentions"]
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) ,[self.model_tester.seq_length, self.model_tester.hidden_size] ,)
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : str = getattr(self.model_tester ,"decoder_seq_length" ,self.model_tester.seq_length )
_lowerCamelCase : List[str] = getattr(self.model_tester ,"encoder_seq_length" ,self.model_tester.seq_length )
_lowerCamelCase : int = getattr(self.model_tester ,"key_length" ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = getattr(self.model_tester ,"key_length" ,__lowerCAmelCase )
def check_decoder_attentions_output(__lowerCAmelCase: Optional[int] ):
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertEqual(out_len % 2 ,0 )
_lowerCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] ,)
def check_encoder_attentions_output(__lowerCAmelCase: Dict ):
_lowerCamelCase : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] ,)
for model_class in self.all_model_classes:
_lowerCamelCase : int = True
_lowerCamelCase : Any = False
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : List[str] = len(__lowerCAmelCase )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
if self.is_encoder_decoder:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_decoder_attentions_output(__lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCamelCase : Any = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model(self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) ,len(__lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states ,__lowerCAmelCase )
check_encoder_attentions_output(__lowerCAmelCase )
@require_tf
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_lowerCamelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase : Dict = model(__lowerCAmelCase )[0]
_lowerCamelCase : Any = [1, 6, 768]
self.assertEqual(output.shape ,__lowerCAmelCase )
_lowerCamelCase : Dict = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 )
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
from PIL import Image
def lowerCamelCase_( _lowerCamelCase ) -> Image:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = image.size
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCamelCase : int = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self: Any ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any]=13 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: Dict=4 ,__lowerCAmelCase: int=[10, 20, 30, 40] ,__lowerCAmelCase: str=[2, 2, 3, 2] ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Optional[int]="gelu" ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: str=["stage2", "stage3", "stage4"] ,__lowerCAmelCase: str=[2, 3, 4] ,__lowerCAmelCase: Optional[Any]=None ,):
'''simple docstring'''
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Any = num_stages
_lowerCamelCase : Union[str, Any] = hidden_sizes
_lowerCamelCase : Union[str, Any] = depths
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : List[str] = num_labels
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Union[str, Any] = out_features
_lowerCamelCase : int = out_indices
_lowerCamelCase : Optional[int] = scope
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : int = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: int ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Union[str, Any] = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = ConvNextVaModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCamelCase : Union[str, Any] = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Any = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
_lowerCamelCase : Tuple = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCamelCase : Any = False
_lowerCamelCase : str = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_lowerCamelCase : List[Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
_lowerCamelCase : str = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: Any ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Any = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Dict ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : Tuple = preprocessor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( _a , _a ):
@register_to_config
def __init__( self: Optional[Any] ,__lowerCAmelCase: bool ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : List[str] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCamelCase : List[Any] = torch.zeros(__lowerCAmelCase ,__lowerCAmelCase )
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = torch.nn.Parameter(__lowerCAmelCase )
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self: Tuple ,__lowerCAmelCase: VQModel ,__lowerCAmelCase: CLIPTextModel ,__lowerCAmelCase: CLIPTokenizer ,__lowerCAmelCase: TransformeraDModel ,__lowerCAmelCase: VQDiffusionScheduler ,__lowerCAmelCase: LearnedClassifierFreeSamplingEmbeddings ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__lowerCAmelCase ,transformer=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,learned_classifier_free_sampling_embeddings=__lowerCAmelCase ,)
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = len(__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else 1
# get prompt text embeddings
_lowerCamelCase : Dict = self.tokenizer(
__lowerCAmelCase ,padding="max_length" ,max_length=self.tokenizer.model_max_length ,return_tensors="pt" ,)
_lowerCamelCase : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCamelCase : Any = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCamelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCamelCase : int = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=__lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCamelCase : Union[str, Any] = prompt_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCamelCase : Optional[int] = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCamelCase : int = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCAmelCase ,1 ,1 )
else:
_lowerCamelCase : str = [""] * batch_size
_lowerCamelCase : Optional[Any] = text_input_ids.shape[-1]
_lowerCamelCase : Dict = self.tokenizer(
__lowerCAmelCase ,padding="max_length" ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ,return_tensors="pt" ,)
_lowerCamelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCamelCase : Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=__lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase : Any = negative_prompt_embeds.shape[1]
_lowerCamelCase : int = negative_prompt_embeds.repeat(1 ,__lowerCAmelCase ,1 )
_lowerCamelCase : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__lowerCAmelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : int = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple ,__lowerCAmelCase: Union[str, List[str]] ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 5.0 ,__lowerCAmelCase: float = 1.0 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__lowerCAmelCase: int = 1 ,):
'''simple docstring'''
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = 1
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = len(__lowerCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Dict = batch_size * num_images_per_prompt
_lowerCamelCase : Tuple = guidance_scale > 1.0
_lowerCamelCase : Optional[Any] = self._encode_prompt(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCAmelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
_lowerCamelCase : Any = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCamelCase : int = self.transformer.num_vector_embeds - 1
_lowerCamelCase : List[Any] = torch.full(__lowerCAmelCase ,__lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_lowerCamelCase : Union[str, Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase ,device=self.device )
_lowerCamelCase : List[Any] = self.scheduler.timesteps.to(self.device )
_lowerCamelCase : List[Any] = latents
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCamelCase : List[Any] = self.transformer(__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,timestep=__lowerCAmelCase ).sample
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Optional[int] = model_output.chunk(2 )
_lowerCamelCase : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCAmelCase ,dim=1 ,keepdim=__lowerCAmelCase )
_lowerCamelCase : Dict = self.truncate(__lowerCAmelCase ,__lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
_lowerCamelCase : Any = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Dict = self.scheduler.step(__lowerCAmelCase ,timestep=__lowerCAmelCase ,sample=__lowerCAmelCase ,generator=__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.vqvae.config.vq_embed_dim
_lowerCamelCase : Any = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCamelCase : List[Any] = self.vqvae.quantize.get_codebook_entry(__lowerCAmelCase ,shape=__lowerCAmelCase )
_lowerCamelCase : Any = self.vqvae.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase ).sample
_lowerCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 ,1 )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Dict = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: float ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = torch.sort(__lowerCAmelCase ,1 ,descending=__lowerCAmelCase )
_lowerCamelCase : int = torch.exp(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCamelCase : Tuple = torch.full_like(keep_mask[:, 0:1, :] ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.cat((all_true, keep_mask) ,dim=1 )
_lowerCamelCase : List[str] = keep_mask[:, :-1, :]
_lowerCamelCase : Optional[Any] = keep_mask.gather(1 ,indices.argsort(1 ) )
_lowerCamelCase : List[str] = log_p_x_0.clone()
_lowerCamelCase : Optional[Any] = -torch.inf # -inf = log(0)
return rv
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list:
'''simple docstring'''
_lowerCamelCase : List[str] = len(_lowerCamelCase )
_lowerCamelCase : Any = [[0] * n for i in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
_lowerCamelCase : Tuple = y_points[i]
for i in range(2 , _lowerCamelCase ):
for j in range(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class A_ :
def __init__( self: Tuple ,__lowerCAmelCase: int = 6 ):
'''simple docstring'''
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
self.create_linked_list(__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Node()
_lowerCamelCase : Optional[int] = current_node
_lowerCamelCase : Any = current_node
_lowerCamelCase : int = current_node
for _ in range(1 ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = Node()
_lowerCamelCase : Any = current_node
_lowerCamelCase : Tuple = previous_node
_lowerCamelCase : Optional[Any] = current_node
_lowerCamelCase : Union[str, Any] = self.front
_lowerCamelCase : Any = previous_node
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _lowercase ( self: Any ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowerCamelCase : List[str] = self.rear.next
if self.rear:
_lowerCamelCase : List[str] = data
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowerCamelCase : Any = self.front.data
_lowerCamelCase : Dict = None
return data
_lowerCamelCase : List[Any] = self.front
_lowerCamelCase : Optional[Any] = old_front.next
_lowerCamelCase : str = old_front.data
_lowerCamelCase : List[str] = None
return data
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self.is_empty():
raise Exception("Empty Queue" )
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class A_ :
def __init__( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Any | None = None
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self: str ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Union[int, float] = 1 / 255 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,__lowerCAmelCase: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Dict = size if size is not None else {"shortest_edge": 224}
_lowerCamelCase : Any = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCamelCase : Any = get_size_dict(__lowerCAmelCase ,param_name="crop_size" )
_lowerCamelCase : List[Any] = do_resize
_lowerCamelCase : Union[str, Any] = size
_lowerCamelCase : List[str] = resample
_lowerCamelCase : List[Any] = do_center_crop
_lowerCamelCase : Union[str, Any] = crop_size
_lowerCamelCase : Tuple = do_rescale
_lowerCamelCase : Dict = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCamelCase : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowercase ( self: List[str] ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : str = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCamelCase : Optional[Any] = int((256 / 224) * size["shortest_edge"] )
_lowerCamelCase : Union[str, Any] = get_resize_output_image_size(__lowerCAmelCase ,size=__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__lowerCAmelCase ,size=(size_dict["height"], size_dict["width"]) ,resample=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
_lowerCamelCase : str = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__lowerCAmelCase ,size=(size["height"], size["width"]) ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[int, float] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: str ,):
'''simple docstring'''
return rescale(__lowerCAmelCase ,scale=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: str ,):
'''simple docstring'''
return normalize(__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: ImageInput ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Dict[str, int]] = None ,__lowerCAmelCase: PILImageResampling = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Dict[str, int]] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[float] = None ,__lowerCAmelCase: Optional[bool] = None ,__lowerCAmelCase: Optional[Union[float, Iterable[float]]] = None ,__lowerCAmelCase: Optional[Union[float, Iterable[float]]] = None ,__lowerCAmelCase: Optional[TensorType] = None ,__lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Dict = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Dict = resample if resample is not None else self.resample
_lowerCamelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Optional[int] = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : List[str] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Optional[int] = get_size_dict(__lowerCAmelCase ,param_name="crop_size" )
_lowerCamelCase : Any = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCamelCase : List[Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
_lowerCamelCase : List[str] = [self.resize(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCamelCase : Optional[Any] = [self.center_crop(__lowerCAmelCase ,__lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCamelCase : Union[str, Any] = [self.rescale(__lowerCAmelCase ,__lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCamelCase : Any = [self.normalize(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for image in images]
_lowerCamelCase : List[Any] = [to_channel_dimension_format(__lowerCAmelCase ,__lowerCAmelCase ) for image in images]
_lowerCamelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__lowerCAmelCase ,tensor_type=__lowerCAmelCase )
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: str=16 ,__lowerCAmelCase: Any=[1, 2, 1] ,__lowerCAmelCase: List[Any]=[2, 2, 4] ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: List[str]=2.0 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Union[str, Any]=0.0 ,__lowerCAmelCase: Dict=0.0 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: List[str]=False ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-5 ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: int=8 ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : Optional[int] = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : Union[str, Any] = mlp_ratio
_lowerCamelCase : Tuple = qkv_bias
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Optional[int] = use_absolute_embeddings
_lowerCamelCase : Optional[int] = patch_norm
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : int = scope
_lowerCamelCase : Any = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Dict = encoder_stride
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Tuple ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowercase ( self: Dict ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = SwinvaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
_lowerCamelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = SwinvaForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Optional[Any] = SwinvaForMaskedImageModeling(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.type_sequence_label_size
_lowerCamelCase : Tuple = SwinvaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = SwinvaModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,embed_dim=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = True
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = True
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : int = outputs.attentions
_lowerCamelCase : int = len(self.model_tester.depths )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : int = True
_lowerCamelCase : Any = config.window_size**2
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Dict = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
if hasattr(self.model_tester ,"num_hidden_states_types" ):
_lowerCamelCase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_lowerCamelCase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : int = outputs.hidden_states
_lowerCamelCase : Optional[Any] = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# Swinv2 has a different seq_length
_lowerCamelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_lowerCamelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__lowerCAmelCase ,__lowerCAmelCase ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Tuple = True
self.check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,(padded_height, padded_width) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = SwinvaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(config=__lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : str = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ ( _a ):
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"num_attention_heads" ) )
class A_ :
def __init__( self: str ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Dict=13 ,__lowerCAmelCase: Optional[int]=64 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Optional[Any]=16 ,__lowerCAmelCase: Tuple=[128, 256, 384] ,__lowerCAmelCase: Dict=[4, 6, 8] ,__lowerCAmelCase: Tuple=[2, 3, 4] ,__lowerCAmelCase: List[Any]=[16, 16, 16] ,__lowerCAmelCase: Dict=0 ,__lowerCAmelCase: List[Any]=[2, 2, 2] ,__lowerCAmelCase: List[Any]=[2, 2, 2] ,__lowerCAmelCase: List[Any]=0.02 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Any=2 ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Any = kernel_size
_lowerCamelCase : List[str] = stride
_lowerCamelCase : Optional[int] = padding
_lowerCamelCase : List[Any] = hidden_sizes
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = depths
_lowerCamelCase : int = key_dim
_lowerCamelCase : List[Any] = drop_path_rate
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : Optional[int] = attention_ratio
_lowerCamelCase : Any = mlp_ratio
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : Any = initializer_range
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Tuple = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = LevitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
_lowerCamelCase : Tuple = (self.image_size, self.image_size)
_lowerCamelCase, _lowerCamelCase : str = image_size[0], image_size[1]
for _ in range(4 ):
_lowerCamelCase : Tuple = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_lowerCamelCase : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def _lowercase ( self: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : int = LevitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = LevitModelTester(self )
_lowerCamelCase : str = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self: str ):
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ):
_lowerCamelCase : int = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : List[str] = outputs.hidden_states
_lowerCamelCase : List[str] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
_lowerCamelCase : List[str] = (self.model_tester.image_size, self.model_tester.image_size)
_lowerCamelCase, _lowerCamelCase : int = image_size[0], image_size[1]
for _ in range(4 ):
_lowerCamelCase : List[str] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_lowerCamelCase : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = super()._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCamelCase : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCamelCase : Dict = False
_lowerCamelCase : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCamelCase : int = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Tuple = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
_lowerCamelCase : Tuple = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
_lowerCamelCase : Dict = problem_type["title"]
_lowerCamelCase : Tuple = problem_type["num_labels"]
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Dict = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
if problem_type["num_labels"] > 1:
_lowerCamelCase : Tuple = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type["num_labels"] )
_lowerCamelCase : List[str] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list:
_lowerCamelCase : Dict = model(**__lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = LevitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : str = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Any = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Dict = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path / "cache"
_lowerCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_lowerCamelCase : Optional[Any] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Any = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : List[str] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Tuple = parquet_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = [parquet_path]
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Union[str, Any] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
_lowerCamelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Any = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
_lowerCamelCase : str = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : str = ParquetDatasetReader({"train": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
if split:
_lowerCamelCase : Union[str, Any] = {split: parquet_path}
else:
_lowerCamelCase : Optional[Any] = "train"
_lowerCamelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path}
_lowerCamelCase : Optional[int] = tmp_path / "cache"
_lowerCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Tuple = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Dict = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCamelCase : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
_lowerCamelCase : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Any = str(shared_datadir / "test_image_rgb.jpg" )
_lowerCamelCase : Optional[Any] = {"image": [image_path]}
_lowerCamelCase : List[str] = Features({"image": Image()} )
_lowerCamelCase : List[str] = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase )
_lowerCamelCase : List[Any] = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCamelCase : List[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_lowerCamelCase : List[str] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
assert get_writer_batch_size(_lowerCamelCase ) == expected
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = ""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
return data[1:] + data[0]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ""
for i in range(len(_lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = int("0b" + data[0] + data[-1] , 2 )
_lowerCamelCase : int = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[Any] = message[:4]
_lowerCamelCase : str = message[4:]
_lowerCamelCase : Any = apply_table(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = xor(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[Any] = apply_sbox(_lowerCamelCase , temp[:4] ) # noqa: E741
_lowerCamelCase : Any = apply_sbox(_lowerCamelCase , temp[4:] )
_lowerCamelCase : Optional[Any] = "0" * (2 - len(_lowerCamelCase )) + l # noqa: E741
_lowerCamelCase : str = "0" * (2 - len(_lowerCamelCase )) + r
_lowerCamelCase : str = apply_table(l + r , _lowerCamelCase )
_lowerCamelCase : Optional[Any] = xor(_lowerCamelCase , _lowerCamelCase )
return temp + right
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = input('''Enter 10 bit key: ''')
_lowerCAmelCase : Tuple = input('''Enter 8 bit message: ''')
_lowerCAmelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCAmelCase : Optional[int] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCAmelCase : Tuple = [2, 4, 3, 1]
_lowerCAmelCase : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCAmelCase : int = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCAmelCase : int = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCAmelCase : str = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCAmelCase : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCAmelCase : int = apply_table(key, paa_table)
_lowerCAmelCase : int = temp[:5]
_lowerCAmelCase : Dict = temp[5:]
_lowerCAmelCase : int = left_shift(left)
_lowerCAmelCase : List[Any] = left_shift(right)
_lowerCAmelCase : List[Any] = apply_table(left + right, pa_table)
_lowerCAmelCase : Tuple = left_shift(left)
_lowerCAmelCase : Union[str, Any] = left_shift(right)
_lowerCAmelCase : Any = left_shift(left)
_lowerCAmelCase : List[Any] = left_shift(right)
_lowerCAmelCase : Optional[Any] = apply_table(left + right, pa_table)
# encryption
_lowerCAmelCase : Any = apply_table(message, IP)
_lowerCAmelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCAmelCase : Optional[int] = temp[4:] + temp[:4]
_lowerCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
_lowerCAmelCase : str = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_lowerCAmelCase : Tuple = apply_table(CT, IP)
_lowerCAmelCase : str = function(expansion, sa, sa, keya, temp)
_lowerCAmelCase : Any = temp[4:] + temp[:4]
_lowerCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_lowerCAmelCase : int = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Dict = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
_lowerCAmelCase : Union[str, Any] = '''▁'''
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BarthezTokenizer
def __init__( self: List[Any] ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Any="<s>" ,__lowerCAmelCase: List[str]="</s>" ,__lowerCAmelCase: int="</s>" ,__lowerCAmelCase: List[Any]="<s>" ,__lowerCAmelCase: List[Any]="<unk>" ,__lowerCAmelCase: List[Any]="<pad>" ,__lowerCAmelCase: Tuple="<mask>" ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
_lowerCamelCase : int = AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else mask_token
super().__init__(
__lowerCAmelCase ,tokenizer_file=__lowerCAmelCase ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Optional[int] = vocab_file
_lowerCamelCase : Union[str, Any] = False if not self.vocab_file else True
def _lowercase ( self: Dict ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self: Any ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
_lowerCamelCase : str = [self.sep_token_id]
_lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase : List[str] = os.path.join(
__lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file ,__lowerCAmelCase )
return (out_vocab_file,)
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
from math import isqrt, loga
def lowerCamelCase_( _lowerCamelCase ) -> list[int]:
'''simple docstring'''
_lowerCamelCase : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : List[str] = False
return [i for i in range(2 , _lowerCamelCase ) if is_prime[i]]
def lowerCamelCase_( _lowerCamelCase = 800800 , _lowerCamelCase = 800800 ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = degree * loga(_lowerCamelCase )
_lowerCamelCase : Optional[int] = int(_lowerCamelCase )
_lowerCamelCase : Optional[int] = calculate_prime_numbers(_lowerCamelCase )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Union[str, Any] = len(_lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCAmelCase : Any = ['''gpt2''']
_lowerCAmelCase : int = '''gpt2'''
if is_tf_available():
class A_ ( tf.Module ):
def __init__( self: List[str] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : str = tokenizer
_lowerCamelCase : Dict = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = TFGPTaLMHeadModel.from_config(__lowerCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name="text" ),) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.tokenizer(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenized["input_ids"].to_tensor()
_lowerCamelCase : List[str] = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_lowerCamelCase : str = self.model(input_ids=__lowerCAmelCase ,attention_mask=__lowerCAmelCase )["logits"]
return outputs
@require_tf
@require_keras_nlp
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
_lowerCamelCase : Optional[Any] = [GPTaTokenizer.from_pretrained(__lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_lowerCamelCase : str = [TFGPTaTokenizer.from_pretrained(__lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCamelCase : Any = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_lowerCamelCase : Union[str, Any] = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def _lowercase ( self: Dict ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_lowerCamelCase : Any = tokenizer([test_inputs] ,return_tensors="tf" )
_lowerCamelCase : Optional[Any] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_lowerCamelCase : Optional[Any] = python_outputs[key].numpy()
_lowerCamelCase : Tuple = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__lowerCAmelCase ,tf.intaa ) == tf_outputs_values ) )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Optional[int] = tf.function(__lowerCAmelCase )
for test_inputs in self.test_sentences:
_lowerCamelCase : Union[str, Any] = tf.constant(__lowerCAmelCase )
_lowerCamelCase : str = compiled_tokenizer(__lowerCAmelCase )
_lowerCamelCase : Dict = tf_tokenizer(__lowerCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : str = ModelToSave(tokenizer=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : Optional[int] = model.serving(__lowerCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) / "saved.model"
tf.saved_model.save(__lowerCAmelCase ,__lowerCAmelCase ,signatures={"serving_default": model.serving} )
_lowerCamelCase : Union[str, Any] = tf.saved_model.load(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = loaded_model.signatures["serving_default"](__lowerCAmelCase )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : str = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : Optional[Any] = tf_tokenizer(__lowerCAmelCase ) # Build model with some sample inputs
_lowerCamelCase : Dict = tf_tokenizer.get_config()
_lowerCamelCase : int = TFGPTaTokenizer.from_config(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model_from_config(__lowerCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_lowerCamelCase : Tuple = 123_123
for max_length in [3, 5, 1_024]:
_lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : Any = tf_tokenizer(__lowerCAmelCase ,max_length=__lowerCAmelCase )
_lowerCamelCase : Dict = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCAmelCase : Dict = '''facebook/wmt19-en-de'''
_lowerCAmelCase : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCAmelCase : List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCAmelCase : Any = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCAmelCase : List[str] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_lowerCAmelCase : Union[str, Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
_lowerCAmelCase : int = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A_ ( unittest.TestCase ):
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
_lowerCamelCase : Tuple = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
_lowerCamelCase : List[str] = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] ,dtype=tf.floataa ,) # expected non filtered values as noted above
_lowerCamelCase : Tuple = tf_top_k_top_p_filtering(__lowerCAmelCase ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
_lowerCamelCase : str = output[output != -float("inf" )]
_lowerCamelCase : Optional[int] = tf.cast(
tf.where(tf.not_equal(__lowerCAmelCase ,tf.constant(-float("inf" ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(__lowerCAmelCase ,__lowerCAmelCase ,rtol=1e-12 )
tf.debugging.assert_equal(__lowerCAmelCase ,__lowerCAmelCase )
@require_tf
class A_ ( unittest.TestCase , _a ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowerCAmelCase__ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : Optional[int] = 2
class A_ ( tf.Module ):
def __init__( self: Union[str, Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
super(__lowerCAmelCase ,self ).__init__()
_lowerCamelCase : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name="input_ids" ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name="attention_mask" ),
) ,jit_compile=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.model.generate(
input_ids=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,max_new_tokens=__lowerCAmelCase ,return_dict_in_generate=__lowerCAmelCase ,)
return {"sequences": outputs["sequences"]}
_lowerCamelCase : Union[str, Any] = [[2, 0], [102, 103]]
_lowerCamelCase : int = [[1, 0], [1, 1]]
_lowerCamelCase : Any = DummyModel(model=__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCAmelCase ,__lowerCAmelCase ,signatures={"serving_default": dummy_model.serving} )
_lowerCamelCase : Optional[int] = tf.saved_model.load(__lowerCAmelCase ).signatures["serving_default"]
for batch_size in range(1 ,len(__lowerCAmelCase ) + 1 ):
_lowerCamelCase : Optional[int] = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
_lowerCamelCase : Tuple = serving_func(**__lowerCAmelCase )["sequences"]
_lowerCamelCase : Dict = test_model.generate(**__lowerCAmelCase ,max_new_tokens=__lowerCAmelCase )
tf.debugging.assert_equal(__lowerCAmelCase ,__lowerCAmelCase )
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCamelCase : Any = 1
_lowerCamelCase : Tuple = 2
class A_ ( tf.Module ):
def __init__( self: Any ,__lowerCAmelCase: int ):
'''simple docstring'''
super(__lowerCAmelCase ,self ).__init__()
_lowerCamelCase : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name="input_ids" ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name="attention_mask" ),
) ,jit_compile=__lowerCAmelCase ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model.generate(
input_ids=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,max_new_tokens=__lowerCAmelCase ,return_dict_in_generate=__lowerCAmelCase ,)
return {"sequences": outputs["sequences"]}
_lowerCamelCase : int = [[2], [102, 103]]
_lowerCamelCase : Optional[int] = [[1], [1, 1]]
_lowerCamelCase : str = DummyModel(model=__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCAmelCase ,__lowerCAmelCase ,signatures={"serving_default": dummy_model.serving} )
_lowerCamelCase : List[str] = tf.saved_model.load(__lowerCAmelCase ).signatures["serving_default"]
for input_row in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
_lowerCamelCase : Any = serving_func(**__lowerCAmelCase )["sequences"]
_lowerCamelCase : Any = test_model.generate(**__lowerCAmelCase ,max_new_tokens=__lowerCAmelCase )
tf.debugging.assert_equal(__lowerCAmelCase ,__lowerCAmelCase )
@slow
@require_tensorflow_text
def _lowercase ( self: Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" ,filename="spiece.model" ,local_dir=__lowerCAmelCase )
class A_ ( tf.keras.layers.Layer ):
def __init__( self: List[str] ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__lowerCAmelCase ,"spiece.model" ) ,"rb" ).read() )
_lowerCamelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,*__lowerCAmelCase: str ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = text.pad_model_inputs(
__lowerCAmelCase ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
_lowerCamelCase : Dict = self.model.generate(input_ids=__lowerCAmelCase ,attention_mask=__lowerCAmelCase )
return self.tokenizer.detokenize(__lowerCAmelCase )
_lowerCamelCase : List[str] = CompleteSentenceTransformer()
_lowerCamelCase : Optional[Any] = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name="inputs" )
_lowerCamelCase : Tuple = complete_model(__lowerCAmelCase )
_lowerCamelCase : int = tf.keras.Model(__lowerCAmelCase ,__lowerCAmelCase )
keras_model.save(__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
_lowerCamelCase : List[Any] = 14
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCamelCase : List[str] = "Hello, my dog is cute and"
_lowerCamelCase : List[Any] = tokenizer(__lowerCAmelCase ,return_tensors="tf" )
_lowerCamelCase : Dict = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCamelCase : Optional[Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
_lowerCamelCase : Optional[int] = model.generate(**__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_lowerCamelCase : str = [638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
_lowerCamelCase : str = model.generate(**__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
_lowerCamelCase : Optional[Any] = "Hugging Face is a technology company based in New York and Paris."
_lowerCamelCase : Optional[Any] = bart_tokenizer(__lowerCAmelCase ,return_tensors="tf" ).input_ids
_lowerCamelCase : Optional[Any] = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
_lowerCamelCase : List[str] = bart_model.generate(__lowerCAmelCase ).numpy()
class A_ ( _a ):
def _lowercase ( self: Dict ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
return super().call(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
_lowerCamelCase : Any = bart_model.generate(__lowerCAmelCase ,foo="bar" ).numpy()
self.assertTrue(np.array_equal(__lowerCAmelCase ,__lowerCAmelCase ) )
class A_ ( bart_model.model.encoder.__class__ ):
def _lowercase ( self: Any ,__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
return super().call(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = FakeEncoder(bart_model.config ,bart_model.model.shared )
_lowerCamelCase : List[str] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_lowerCamelCase : Optional[Any] = bart_model.generate(__lowerCAmelCase ).numpy()
with self.assertRaises(__lowerCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__lowerCAmelCase ,foo="bar" )
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=_lowerCamelCase )
_lowerCamelCase : Dict = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase )
EnvironmentCommand.register_subcommand(_lowerCamelCase )
TestCommand.register_subcommand(_lowerCamelCase )
RunBeamCommand.register_subcommand(_lowerCamelCase )
DummyDataCommand.register_subcommand(_lowerCamelCase )
# Parse args
_lowerCamelCase, _lowerCamelCase : Tuple = parser.parse_known_args()
if not hasattr(_lowerCamelCase , "func" ):
parser.print_help()
exit(1 )
_lowerCamelCase : int = parse_unknown_args(_lowerCamelCase )
# Run
_lowerCamelCase : Optional[int] = args.func(_lowerCamelCase , **_lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 50 ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
import operator as op
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = lambda _lowerCamelCase , _lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
_lowerCamelCase : Optional[int] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
else:
_lowerCamelCase : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
_lowerCamelCase : Tuple = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_lowerCamelCase ) , int(_lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_lowerCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self: Dict ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[float] = None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
if audio_length_in_s is None:
_lowerCamelCase : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCamelCase : List[str] = audio_length_in_s * self.unet.config.sample_rate
_lowerCamelCase : List[str] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
_lowerCamelCase : Optional[Any] = int(__lowerCAmelCase )
if sample_size % down_scale_factor != 0:
_lowerCamelCase : Tuple = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
_lowerCamelCase : Union[str, Any] = int(__lowerCAmelCase )
_lowerCamelCase : Tuple = next(iter(self.unet.parameters() ) ).dtype
_lowerCamelCase : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_lowerCamelCase : Union[str, Any] = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=self.device ,dtype=__lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase ,device=audio.device )
_lowerCamelCase : str = self.scheduler.timesteps.to(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCamelCase : int = self.unet(__lowerCAmelCase ,__lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCamelCase : Any = self.scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
_lowerCamelCase : str = audio.clamp(-1 ,1 ).float().cpu().numpy()
_lowerCamelCase : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__lowerCAmelCase )
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_lowerCAmelCase : Optional[Any] = '''examples/'''
_lowerCAmelCase : Optional[Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_lowerCAmelCase : Tuple = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_lowerCAmelCase : List[str] = '''README.md'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : Tuple = f.read()
_lowerCamelCase, _lowerCamelCase : Dict = REPLACE_PATTERNS[pattern]
_lowerCamelCase : List[Any] = replace.replace("VERSION" , _lowerCamelCase )
_lowerCamelCase : Tuple = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="examples" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> int:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = "🤗 Transformers currently provides the following architectures"
_lowerCamelCase : str = "1. Want to contribute a new model?"
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : Optional[Any] = f.readlines()
# Find the start of the list.
_lowerCamelCase : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCamelCase : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
_lowerCamelCase : Tuple = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_lowerCamelCase )
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
_lowerCamelCase : Any = f.read()
_lowerCamelCase : Optional[int] = REPLACE_PATTERNS["init"][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
_lowerCamelCase : Union[str, Any] = default_version.base_version
elif patch:
_lowerCamelCase : List[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_lowerCamelCase : Union[str, Any] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_lowerCamelCase : Tuple = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCamelCase ) == 0:
_lowerCamelCase : Any = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = get_version()
_lowerCamelCase : Optional[int] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_lowerCamelCase : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
_lowerCamelCase : List[Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCamelCase ) == 0:
_lowerCamelCase : Any = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowerCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_lowerCAmelCase : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import math
import sys
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if number != int(_lowerCamelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_lowerCamelCase : Optional[int] = [-1] * (number + 1)
_lowerCamelCase : int = 0
for i in range(1 , number + 1 ):
_lowerCamelCase : str = sys.maxsize
_lowerCamelCase : List[Any] = int(math.sqrt(_lowerCamelCase ) )
for j in range(1 , root + 1 ):
_lowerCamelCase : Optional[Any] = 1 + answers[i - (j**2)]
_lowerCamelCase : Dict = min(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import math
class A_ :
def _lowercase ( self: Tuple ,__lowerCAmelCase: list[list[float]] ,__lowerCAmelCase: list[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0.0
_lowerCamelCase : Optional[Any] = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) ,2 )
da += math.pow((sample[i] - weights[1][i]) ,2 )
return 0 if da > da else 1
return 0
def _lowercase ( self: str ,__lowerCAmelCase: list[list[int | float]] ,__lowerCAmelCase: list[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: float ):
'''simple docstring'''
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_lowerCamelCase : Any = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_lowerCamelCase : List[str] = SelfOrganizingMap()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Union[str, Any] = 0.5
for _ in range(_lowerCamelCase ):
for j in range(len(_lowerCamelCase ) ):
# training sample
_lowerCamelCase : Any = training_samples[j]
# Compute the winning vector
_lowerCamelCase : List[str] = self_organizing_map.get_winner(_lowerCamelCase , _lowerCamelCase )
# Update the winning vector
_lowerCamelCase : Dict = self_organizing_map.update(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# classify test sample
_lowerCamelCase : Tuple = [0, 0, 0, 1]
_lowerCamelCase : Union[str, Any] = self_organizing_map.get_winner(_lowerCamelCase , _lowerCamelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=5 ) -> Tuple:
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
_lowerCamelCase : str = torch.tensor(tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) ).unsqueeze(0 ) # Batch size 1
_lowerCamelCase : Tuple = model(_lowerCamelCase )[0] # The last hidden-state is the first element of the output tuple
_lowerCamelCase : str = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_lowerCamelCase : Union[str, Any] = logits[0, masked_index, :]
_lowerCamelCase : int = logits.softmax(dim=0 )
_lowerCamelCase, _lowerCamelCase : int = prob.topk(k=_lowerCamelCase , dim=0 )
_lowerCamelCase : Any = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCamelCase ) )] )
_lowerCamelCase : Optional[Any] = tokenizer.mask_token
_lowerCamelCase : Tuple = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
_lowerCamelCase : int = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(_lowerCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(_lowerCamelCase ) , _lowerCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCamelCase , _lowerCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowerCAmelCase : Tuple = CamembertTokenizer.from_pretrained('''camembert-base''')
_lowerCAmelCase : Optional[Any] = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
_lowerCAmelCase : Any = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 1000 ) -> int:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = 1, 1
_lowerCamelCase : Optional[int] = []
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = prev_numerator + 2 * prev_denominator
_lowerCamelCase : Union[str, Any] = prev_numerator + prev_denominator
if len(str(_lowerCamelCase ) ) > len(str(_lowerCamelCase ) ):
result.append(_lowerCamelCase )
_lowerCamelCase : List[Any] = numerator
_lowerCamelCase : Any = denominator
return len(_lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
_lowerCAmelCase : Optional[int] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
_lowerCAmelCase : Union[str, Any] = '''▁'''
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, Any]="<s>" ,__lowerCAmelCase: str="</s>" ,__lowerCAmelCase: Any="</s>" ,__lowerCAmelCase: Tuple="<s>" ,__lowerCAmelCase: Dict="<unk>" ,__lowerCAmelCase: Optional[Any]="<pad>" ,__lowerCAmelCase: Any="<mask>" ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else mask_token
_lowerCamelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCAmelCase ,)
_lowerCamelCase : Optional[Any] = vocab_file
_lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_lowerCamelCase : int = len(self.sp_model ) - 1
_lowerCamelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Optional[int] = [self.cls_token_id]
_lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ,__lowerCAmelCase: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase ,token_ids_a=__lowerCAmelCase ,already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowercase ( self: List[Any] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase ,out_type=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Tuple = self.sp_model.PieceToId(__lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : List[str] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowerCamelCase : Any = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __getstate__( self: str ):
'''simple docstring'''
_lowerCamelCase : Dict = self.__dict__.copy()
_lowerCamelCase : Any = None
return state
def __setstate__( self: Optional[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCamelCase : List[str] = os.path.join(
__lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase ,"wb" ) as fi:
_lowerCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
import sys
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = len(_lowerCamelCase )
_lowerCamelCase : str = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
_lowerCamelCase : int = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
for chain_length in range(2 , _lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
_lowerCamelCase : Tuple = a + chain_length - 1
_lowerCamelCase : Dict = sys.maxsize
for c in range(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Tuple = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_lowerCamelCase : int = cost
_lowerCamelCase : List[Any] = c
return matrix, sol
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if i == j:
print("A" + str(_lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(_lowerCamelCase , _lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(_lowerCamelCase , optimal_solution[i][j] + 1 , _lowerCamelCase )
print(")" , end=" " )
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = [30, 35, 15, 5, 10, 20, 25]
_lowerCamelCase : int = len(_lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_lowerCamelCase, _lowerCamelCase : List[Any] = matrix_chain_order(_lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = "ylacombe/bark-small"
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Dict = "en_speaker_1"
_lowerCamelCase : List[str] = "This is a test string"
_lowerCamelCase : Optional[int] = "speaker_embeddings_path.json"
_lowerCamelCase : Dict = "speaker_embeddings"
def _lowercase ( self: str ,**__lowerCAmelCase: int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint ,**__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : List[str] = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
_lowerCamelCase : Any = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
_lowerCamelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="(BOS)" ,eos_token="(EOS)" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
_lowerCamelCase : Union[str, Any] = 35
_lowerCamelCase : Optional[Any] = 2
_lowerCamelCase : int = 8
_lowerCamelCase : Union[str, Any] = {
"semantic_prompt": np.ones(__lowerCAmelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCamelCase : Union[str, Any] = processor(text=self.input_string ,voice_preset=__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(__lowerCAmelCase ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname ,"file.npz" )
np.savez(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : str = processor(text=self.input_string ,voice_preset=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(__lowerCAmelCase ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCamelCase : Optional[Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = BarkProcessor(tokenizer=__lowerCAmelCase )
_lowerCamelCase : Tuple = processor(text=self.input_string )
_lowerCamelCase : Union[str, Any] = tokenizer(
self.input_string ,padding="max_length" ,max_length=256 ,add_special_tokens=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,return_token_type_ids=__lowerCAmelCase ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 10 , _lowerCamelCase = 22 ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = range(1 , _lowerCamelCase )
_lowerCamelCase : Optional[int] = range(1 , _lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
def __init__( self: str ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: Optional[Any]=7 ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Any=99 ,__lowerCAmelCase: List[str]=32 ,__lowerCAmelCase: Tuple=5 ,__lowerCAmelCase: List[str]=4 ,__lowerCAmelCase: Dict=37 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: List[Any]=16 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: str=None ,):
'''simple docstring'''
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : Union[str, Any] = use_input_mask
_lowerCamelCase : int = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Union[str, Any] = num_choices
_lowerCamelCase : str = scope
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : str = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : str = None
if self.use_token_type_ids:
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: List[str] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
def _lowercase ( self: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = LlamaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[Any] = LlamaModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,)
_lowerCamelCase : Dict = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] ,):
'''simple docstring'''
_lowerCamelCase : Tuple = LlamaForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Dict = True
_lowerCamelCase : int = LlamaForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
_lowerCamelCase : Any = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,use_cache=__lowerCAmelCase ,)
_lowerCamelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowerCamelCase : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowerCamelCase : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
_lowerCamelCase : Tuple = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,output_hidden_states=__lowerCAmelCase ,)["hidden_states"][0]
_lowerCamelCase : List[str] = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,past_key_values=__lowerCAmelCase ,output_hidden_states=__lowerCAmelCase ,)["hidden_states"][0]
# select random slice
_lowerCamelCase : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowerCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-3 ) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Dict = config_and_inputs
_lowerCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = LlamaModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Any = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = 3
_lowerCamelCase : Optional[Any] = input_dict["input_ids"]
_lowerCamelCase : List[str] = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowerCamelCase : int = LlamaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = 3
_lowerCamelCase : Optional[Any] = "single_label_classification"
_lowerCamelCase : Union[str, Any] = input_dict["input_ids"]
_lowerCamelCase : List[Any] = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowerCamelCase : Tuple = LlamaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = 3
_lowerCamelCase : List[str] = "multi_label_classification"
_lowerCamelCase : Union[str, Any] = input_dict["input_ids"]
_lowerCamelCase : int = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : Any = LlamaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _lowercase ( self: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = ids_tensor([1, 10] ,config.vocab_size )
_lowerCamelCase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : int = LlamaModel(__lowerCAmelCase )
original_model.to(__lowerCAmelCase )
original_model.eval()
_lowerCamelCase : List[str] = original_model(__lowerCAmelCase ).last_hidden_state
_lowerCamelCase : Any = original_model(__lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : List[Any] = {"type": scaling_type, "factor": 10.0}
_lowerCamelCase : str = LlamaModel(__lowerCAmelCase )
scaled_model.to(__lowerCAmelCase )
scaled_model.eval()
_lowerCamelCase : Any = scaled_model(__lowerCAmelCase ).last_hidden_state
_lowerCamelCase : List[str] = scaled_model(__lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCAmelCase ,__lowerCAmelCase ,atol=1e-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
_lowerCamelCase : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" ,device_map="auto" )
_lowerCamelCase : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCamelCase : Any = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowerCAmelCase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Dict = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__lowerCAmelCase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
_lowerCamelCase : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" ,device_map="auto" )
_lowerCamelCase : Dict = model(torch.tensor(__lowerCAmelCase ) )
# Expected mean on dim = -1
_lowerCamelCase : List[str] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowerCAmelCase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Union[str, Any] = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__lowerCAmelCase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
_lowerCamelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" ,device_map="auto" )
_lowerCamelCase : List[Any] = model(torch.tensor(__lowerCAmelCase ) )
# Expected mean on dim = -1
_lowerCamelCase : Union[str, Any] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowerCAmelCase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase : Any = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,__lowerCAmelCase ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
_lowerCamelCase : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" ,device_map="auto" )
_lowerCamelCase : Dict = model(torch.tensor(__lowerCAmelCase ) )
_lowerCamelCase : Any = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,__lowerCAmelCase ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
_lowerCamelCase : str = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__lowerCAmelCase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("Model is curently gated" )
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
_lowerCamelCase : Tuple = "Simply put, the theory of relativity states that "
_lowerCamelCase : Tuple = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
_lowerCamelCase : List[str] = tokenizer.encode(__lowerCAmelCase ,return_tensors="pt" )
_lowerCamelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" ,device_map="sequential" ,use_safetensors=__lowerCAmelCase )
# greedy generation outputs
_lowerCamelCase : Optional[Any] = model.generate(__lowerCAmelCase ,max_new_tokens=64 ,top_p=__lowerCAmelCase ,temperature=1 ,do_sample=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: int=2 ,__lowerCAmelCase: Dict=24 ,__lowerCAmelCase: int=16 ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Optional[int]=32 ,__lowerCAmelCase: Optional[int]=5 ,__lowerCAmelCase: Optional[Any]=4 ,__lowerCAmelCase: Optional[Any]=37 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[Any]=10 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: Dict=2 ,):
'''simple docstring'''
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : Any = max_length
_lowerCamelCase : Any = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : int = scope
_lowerCamelCase : Any = frequency_stride
_lowerCamelCase : Optional[int] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : str = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : List[Any] = frequency_out_dimension * time_out_dimension
_lowerCamelCase : Optional[Any] = num_patches + 2
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : List[Any] = self.get_config()
return config, input_values, labels
def _lowercase ( self: str ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,)
def _lowercase ( self: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Dict = config_and_inputs
_lowerCamelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = ASTModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Tuple = ["input_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = torchaudio.load(_lowerCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Dict ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.default_feature_extractor
_lowerCamelCase : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.default_feature_extractor
_lowerCamelCase, _lowerCamelCase : Any = prepare_audio()
_lowerCamelCase : int = audio.squeeze().numpy()
_lowerCamelCase : List[str] = feature_extractor(__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : str = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : str = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class A_ ( _a ):
lowerCAmelCase__ = 'open-llama'
def __init__( self: List[Any] ,__lowerCAmelCase: int=100_000 ,__lowerCAmelCase: Optional[int]=4_096 ,__lowerCAmelCase: Optional[int]=11_008 ,__lowerCAmelCase: Tuple=32 ,__lowerCAmelCase: Union[str, Any]=32 ,__lowerCAmelCase: int="silu" ,__lowerCAmelCase: Dict=2_048 ,__lowerCAmelCase: Dict=0.02 ,__lowerCAmelCase: Any=1e-6 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: List[Any]=1 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Dict=None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = rms_norm_eps
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : str = kwargs.pop(
"use_memorry_efficient_attention" ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_dropout_prob
_lowerCamelCase : Optional[int] = use_stable_embedding
_lowerCamelCase : List[Any] = shared_input_output_embedding
_lowerCamelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,tie_word_embeddings=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,__lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
_lowerCamelCase : Optional[Any] = self.rope_scaling.get("type" ,__lowerCAmelCase )
_lowerCamelCase : int = self.rope_scaling.get("factor" ,__lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCAmelCase : Tuple = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_lowerCamelCase : Tuple = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : List[str] = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape ,__lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__lowerCAmelCase ,atol=1e-3 ) )
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Optional[int] = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape ,__lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__lowerCAmelCase ,atol=1e-3 ) )
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : Optional[int] = logging.getLogger()
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
_lowerCamelCase : Dict = parser.parse_args()
return args.f
class A_ ( _a ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Any = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,"run_glue_deebert.py" )
with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ):
_lowerCamelCase : Dict = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__lowerCAmelCase ,0.6_66 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(__lowerCAmelCase )
_lowerCamelCase : str = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(__lowerCAmelCase )
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : str = 16
_lowerCAmelCase : Dict = 32
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 16 ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Any = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : str = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : Union[str, Any] = 8
else:
_lowerCamelCase : Optional[int] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCamelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCamelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : Dict = mocked_dataloaders # noqa: F811
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCamelCase : List[str] = 2
# New Code #
_lowerCamelCase : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCamelCase : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Tuple = config["lr"]
_lowerCamelCase : int = int(config["num_epochs"] )
_lowerCamelCase : Any = int(config["seed"] )
_lowerCamelCase : Optional[int] = int(config["batch_size"] )
_lowerCamelCase : Optional[int] = evaluate.load("glue" , "mrpc" )
set_seed(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : Any = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCamelCase ):
_lowerCamelCase : List[str] = model(**_lowerCamelCase )
_lowerCamelCase : Dict = output.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : str = model(**_lowerCamelCase )
_lowerCamelCase : Any = outputs.logits.argmax(dim=-1 )
_lowerCamelCase, _lowerCamelCase : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCamelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _lowerCamelCase )
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCamelCase : Any = parser.parse_args()
_lowerCamelCase : Dict = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A_ ( unittest.TestCase ):
def __init__( self: List[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: Dict=7 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: int=99 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Optional[int]=5 ,__lowerCAmelCase: Optional[Any]=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: List[str]=16 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: List[Any]=0.02 ,__lowerCAmelCase: Optional[int]=4 ,):
'''simple docstring'''
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = use_attention_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Any = num_choices
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : int = None
if self.use_attention_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Tuple = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : Any = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = FlaxRobertaModelTester(self )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class_name.from_pretrained("roberta-base" ,from_pt=__lowerCAmelCase )
_lowerCamelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Any = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : Any = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = '''cpu'''
_lowerCAmelCase : Optional[int] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_lowerCAmelCase : Any = '''path-to-your-trained-model'''
_lowerCAmelCase : Dict = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : List[str] = pipe.to(device)
# to channels last
_lowerCAmelCase : Dict = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : Tuple = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Union[str, Any] = torch.randn(2, 4, 64, 64)
_lowerCAmelCase : Tuple = torch.rand(1) * 999
_lowerCAmelCase : int = torch.randn(2, 77, 768)
_lowerCAmelCase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : str = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Optional[int] = 666
_lowerCAmelCase : Tuple = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : str = {'''generator''': generator}
if args.steps is not None:
_lowerCAmelCase : List[Any] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : int = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCAmelCase : Any = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class A_ ( nn.Module ):
def __init__( self: Optional[int] ,__lowerCAmelCase: Any ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Optional[Any] = torchvision.models.resnetaaa(pretrained=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = list(model.children() )[:-2]
_lowerCamelCase : Optional[int] = nn.Sequential(*__lowerCAmelCase )
_lowerCamelCase : Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _lowercase ( self: Tuple ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Dict = self.pool(self.model(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.flatten(__lowerCAmelCase ,start_dim=2 )
_lowerCamelCase : Tuple = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class A_ ( _a ):
def __init__( self: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = [json.loads(__lowerCAmelCase ) for l in open(__lowerCAmelCase )]
_lowerCamelCase : Any = os.path.dirname(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer
_lowerCamelCase : Union[str, Any] = labels
_lowerCamelCase : Dict = len(__lowerCAmelCase )
_lowerCamelCase : Any = max_seq_length
_lowerCamelCase : List[Any] = transforms
def __len__( self: List[Any] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self: Tuple ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] ,add_special_tokens=__lowerCAmelCase ) )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sentence[0], sentence[1:-1], sentence[-1]
_lowerCamelCase : Optional[Any] = sentence[: self.max_seq_length]
_lowerCamelCase : Any = torch.zeros(self.n_classes )
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : Optional[int] = Image.open(os.path.join(self.data_dir ,self.data[index]["img"] ) ).convert("RGB" )
_lowerCamelCase : Optional[int] = self.transforms(__lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Any = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = [len(row["sentence"] ) for row in batch]
_lowerCamelCase, _lowerCamelCase : Dict = len(_lowerCamelCase ), max(_lowerCamelCase )
_lowerCamelCase : int = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
_lowerCamelCase : Tuple = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
_lowerCamelCase : Dict = input_row["sentence"]
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : List[str] = torch.stack([row["image"] for row in batch] )
_lowerCamelCase : List[Any] = torch.stack([row["label"] for row in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([row["image_start_token"] for row in batch] )
_lowerCamelCase : Dict = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase_( ) -> Tuple:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_a ) , 'Tatoeba directory does not exist.' )
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCAmelCase )
@slow
def _lowercase ( self: int ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = self.resolver.write_model_card("opus-mt-he-en" ,dry_run=__lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_lowerCAmelCase : List[str] = parse(importlib.metadata.version('''torch'''))
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_lowerCamelCase : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = parse(importlib.metadata.version(_lowerCamelCase ) )
return operation(_lowerCamelCase , parse(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> list:
'''simple docstring'''
for i in range(len(_lowerCamelCase ) - 1 , 0 , -1 ):
_lowerCamelCase : Dict = False
for j in range(_lowerCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCamelCase, _lowerCamelCase : Optional[int] = unsorted[j - 1], unsorted[j]
_lowerCamelCase : List[Any] = True
for j in range(_lowerCamelCase ):
if unsorted[j] > unsorted[j + 1]:
_lowerCamelCase, _lowerCamelCase : str = unsorted[j + 1], unsorted[j]
_lowerCamelCase : Union[str, Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase : Union[str, Any] = list[list[float | int]]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Matrix:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = matrix[row][col]
_lowerCamelCase : List[str] = vector[row][0]
_lowerCamelCase : int = 0
_lowerCamelCase : List[Any] = 0
while row < size and col < size:
# pivoting
_lowerCamelCase : Tuple = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCamelCase, _lowerCamelCase : Any = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCamelCase : Any = augmented[rowa][col] / augmented[row][col]
_lowerCamelCase : Any = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase_( _lowerCamelCase ) -> Callable[[int], int]:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCamelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCamelCase : Matrix
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = (x_val + 1) ** (size - col - 1)
_lowerCamelCase : Optional[Any] = y_val
_lowerCamelCase : str = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase_( _lowerCamelCase = question_function , _lowerCamelCase = 10 ) -> int:
'''simple docstring'''
_lowerCamelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCamelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCamelCase : int = 0
_lowerCamelCase : Callable[[int], int]
_lowerCamelCase : int
for poly in polynomials:
_lowerCamelCase : Tuple = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCamelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCamelCase )
return parser.parse_args()
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = parse_args()
# Import training_script as a module.
_lowerCamelCase : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Any = script_fpath.stem
_lowerCamelCase : str = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
_lowerCamelCase : Dict = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = XLMRobertaTokenizer
lowerCAmelCase__ = XLMRobertaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Optional[int] = XLMRobertaTokenizer(__lowerCAmelCase ,keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = "<pad>"
_lowerCamelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-1] ,"<mask>" )
self.assertEqual(len(__lowerCAmelCase ) ,1_002 )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1_002 )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = XLMRobertaTokenizer(__lowerCAmelCase ,keep_accents=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCAmelCase ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
_lowerCamelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
_lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
_lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
def _lowercase ( self: Dict ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : List[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : str = self.tokenizer_class.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = tempfile.mkdtemp()
_lowerCamelCase : Tuple = tokenizer_r.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_lowerCamelCase : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCAmelCase ,__lowerCAmelCase )
# Checks everything loads correctly in the same way
_lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : List[str] = tempfile.mkdtemp()
_lowerCamelCase : Optional[int] = tokenizer_r.save_pretrained(__lowerCAmelCase ,legacy_format=__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase ,__lowerCAmelCase )
# Checks everything loads correctly in the same way
_lowerCamelCase : Optional[Any] = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase ,__lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCamelCase : List[str] = tokenizer_r.save_pretrained(__lowerCAmelCase ,legacy_format=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : Any = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase ,__lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@cached_property
def _lowercase ( self: List[str] ):
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def _lowercase ( self: int ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase ,f.name )
_lowerCamelCase : List[Any] = XLMRobertaTokenizer(f.name ,keep_accents=__lowerCAmelCase )
_lowerCamelCase : List[Any] = pickle.dumps(__lowerCAmelCase )
pickle.loads(__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer()
_lowerCamelCase : Optional[Any] = "I was born in 92000, and this is falsé."
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Tuple = self.get_rust_tokenizer()
_lowerCamelCase : List[str] = tokenizer.encode(__lowerCAmelCase )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "Hello World!"
_lowerCamelCase : Optional[Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase ,self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_lowerCamelCase : Any = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__lowerCAmelCase ,self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {"input_ids": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="xlm-roberta-base" ,revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" ,)
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A_ :
lowerCAmelCase__ = None
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : str = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] ,__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Any = os.path.join(__lowerCAmelCase ,"feat_extract.json" )
feat_extract_first.to_json_file(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : int = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.feature_extraction_class()
self.assertIsNotNone(__lowerCAmelCase )
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: Tuple=7 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=99 ,__lowerCAmelCase: Optional[int]=32 ,__lowerCAmelCase: Dict=5 ,__lowerCAmelCase: Any=4 ,__lowerCAmelCase: int=37 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Union[str, Any]=512 ,__lowerCAmelCase: Optional[int]=16 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: List[str]=None ,):
'''simple docstring'''
_lowerCamelCase : int = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Any = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : Any = scope
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: Any ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
def _lowercase ( self: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Dict = NystromformerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = NystromformerForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = NystromformerForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : int = NystromformerForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: str ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : int = NystromformerForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.num_choices
_lowerCamelCase : List[Any] = NystromformerForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Optional[Any] = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : str = config_and_inputs
_lowerCamelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = NystromformerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = NystromformerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase )[0]
_lowerCamelCase : Dict = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "the [MASK] of Belgium is Brussels"
_lowerCamelCase : str = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : List[Any] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : Optional[int] = tokenizer(__lowerCAmelCase ,return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : Dict = model(encoding.input_ids ).logits
_lowerCamelCase : Tuple = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) ,"capital" )
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase_( _lowerCamelCase = 1000000 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : str = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
_lowerCAmelCase : List[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
_lowerCAmelCase : Union[str, Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self: List[str] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Optional[int]="<s>" ,__lowerCAmelCase: List[Any]="</s>" ,__lowerCAmelCase: List[str]="</s>" ,__lowerCAmelCase: Optional[Any]="<s>" ,__lowerCAmelCase: List[str]="<unk>" ,__lowerCAmelCase: int="<pad>" ,__lowerCAmelCase: List[Any]="<mask>" ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: str ,):
'''simple docstring'''
_lowerCamelCase : int = AddedToken(__lowerCAmelCase ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=__lowerCAmelCase ,tokenizer_file=__lowerCAmelCase ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,src_lang=__lowerCAmelCase ,tgt_lang=__lowerCAmelCase ,additional_special_tokens=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Any = vocab_file
_lowerCamelCase : Dict = False if not self.vocab_file else True
_lowerCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
_lowerCamelCase : Optional[int] = {
lang_code: self.convert_tokens_to_ids(__lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCamelCase : Any = src_lang if src_lang is not None else "en_XX"
_lowerCamelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
_lowerCamelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _lowercase ( self: Any ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self: Any ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] ,__lowerCAmelCase: Optional[str] ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_lowerCamelCase : Tuple = src_lang
_lowerCamelCase : Optional[int] = self(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = self.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tgt_lang_id
return inputs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: str = "en_XX" ,__lowerCAmelCase: Optional[List[str]] = None ,__lowerCAmelCase: str = "ro_RO" ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = src_lang
_lowerCamelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self: List[str] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : Any = []
_lowerCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
_lowerCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCamelCase : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCamelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : int = self.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : Tuple = []
_lowerCamelCase : Tuple = [self.eos_token_id, self.cur_lang_code]
_lowerCamelCase : str = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCamelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCamelCase : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def _lowercase ( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_lowerCamelCase : List[str] = os.path.join(
__lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file ,__lowerCAmelCase )
return (out_vocab_file,)
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowerCamelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class A_ ( _a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self: Dict ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Union[int, float] = 1 / 255 ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : List[str] = size if size is not None else {"shortest_edge": 224}
_lowerCamelCase : str = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCamelCase : List[Any] = get_size_dict(__lowerCAmelCase ,param_name="crop_size" )
_lowerCamelCase : Union[str, Any] = do_resize
_lowerCamelCase : List[str] = size
_lowerCamelCase : Dict = do_center_crop
_lowerCamelCase : Dict = crop_size
_lowerCamelCase : Any = resample
_lowerCamelCase : Any = do_rescale
_lowerCamelCase : Optional[Any] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : List[str] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
if "shortest_edge" in size:
_lowerCamelCase : Dict = get_resize_output_image_size(__lowerCAmelCase ,size["shortest_edge"] ,default_to_square=__lowerCAmelCase )
elif "height" in size and "width" in size:
_lowerCamelCase : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Dict[str, int] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
_lowerCamelCase : str = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowerCAmelCase ,size=(size["height"], size["width"]) ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[int, float] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
return rescale(__lowerCAmelCase ,scale=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Union[float, List[float]] ,__lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
return normalize(__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: ImageInput ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: float = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[ChannelDimension] = ChannelDimension.FIRST ,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCamelCase : int = to_numpy_array(__lowerCAmelCase )
if do_resize:
_lowerCamelCase : Dict = self.resize(image=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase )
if do_center_crop:
_lowerCamelCase : Any = self.center_crop(__lowerCAmelCase ,size=__lowerCAmelCase )
if do_rescale:
_lowerCamelCase : Optional[Any] = self.rescale(image=__lowerCAmelCase ,scale=__lowerCAmelCase )
if do_normalize:
_lowerCamelCase : Optional[int] = self.normalize(image=__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase )
_lowerCamelCase : Tuple = to_channel_dimension_format(__lowerCAmelCase ,__lowerCAmelCase )
return image
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: ImageInput ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: PILImageResampling = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Dict[str, int] = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: float = None ,__lowerCAmelCase: bool = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[float, List[float]]] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST ,**__lowerCAmelCase: str ,):
'''simple docstring'''
_lowerCamelCase : Dict = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
_lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Any = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : List[Any] = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Union[str, Any] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase )
_lowerCamelCase : int = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Any = get_size_dict(__lowerCAmelCase ,param_name="crop_size" )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_lowerCamelCase : List[Any] = make_batched(__lowerCAmelCase )
_lowerCamelCase : str = [
[
self._preprocess_image(
image=__lowerCAmelCase ,do_resize=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,do_center_crop=__lowerCAmelCase ,crop_size=__lowerCAmelCase ,do_rescale=__lowerCAmelCase ,rescale_factor=__lowerCAmelCase ,do_normalize=__lowerCAmelCase ,image_mean=__lowerCAmelCase ,image_std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,)
for img in video
]
for video in videos
]
_lowerCamelCase : Union[str, Any] = {"pixel_values": videos}
return BatchFeature(data=__lowerCAmelCase ,tensor_type=__lowerCAmelCase )
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCamelCase : Any = emb.weight.data
return lin_layer
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Any:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
_lowerCamelCase : List[str] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowerCamelCase : Any = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" )
else:
_lowerCamelCase : Optional[Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
_lowerCamelCase : int = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
_lowerCamelCase : Tuple = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
_lowerCamelCase : Union[str, Any] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
_lowerCamelCase : Union[str, Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
_lowerCamelCase : int = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
_lowerCamelCase : Tuple = key.replace("final_layer_norm" , "ff_layer_norm" )
_lowerCamelCase : List[str] = state_dict[old_key]
return new_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[Any] = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
_lowerCamelCase : Dict = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(_lowerCamelCase ):
_lowerCamelCase : List[str] = torch.load(_lowerCamelCase )["model"]
remove_ignore_keys_(_lowerCamelCase )
_lowerCamelCase : Any = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = os.path.join(
_lowerCamelCase , weights_name.replace(".bin" , F"""-{len(_lowerCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , F"""-{len(_lowerCamelCase )+1:05d}-of-???.bin""" ) )
_lowerCamelCase : List[str] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_lowerCamelCase )
_lowerCamelCase : int = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
_lowerCamelCase : Optional[int] = {}
for idx, shard in enumerate(_lowerCamelCase ):
_lowerCamelCase : int = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin""" )
_lowerCamelCase : str = os.path.join(_lowerCamelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
_lowerCamelCase : Optional[Any] = shard_file
# Add the metadata
_lowerCamelCase : Union[str, Any] = {"total_size": total_size}
_lowerCamelCase : Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + "\n"
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase , _lowerCAmelCase : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCAmelCase : List[Any] = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCAmelCase : Union[str, Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : list[list[int]] = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_lowerCamelCase : Optional[int] = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCAmelCase : Optional[Any] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_lowerCAmelCase : Dict = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> set[str]:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = set(_lowerCamelCase ), [start]
while stack:
_lowerCamelCase : Union[str, Any] = stack.pop()
explored.add(_lowerCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_lowerCamelCase )
return explored
_lowerCAmelCase : Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCAmelCase : Optional[Any] = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_lowerCAmelCase : Union[str, Any] = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_lowerCAmelCase : Tuple = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_lowerCAmelCase : Optional[int] = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_lowerCAmelCase : Union[str, Any] = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
for tf_name, hf_name in patterns:
_lowerCamelCase : Dict = k.replace(_lowerCamelCase , _lowerCamelCase )
return k
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> BigBirdPegasusForConditionalGeneration:
'''simple docstring'''
_lowerCamelCase : Dict = BigBirdPegasusConfig(**_lowerCamelCase )
_lowerCamelCase : Optional[Any] = BigBirdPegasusForConditionalGeneration(_lowerCamelCase )
_lowerCamelCase : Dict = torch_model.state_dict()
_lowerCamelCase : Tuple = {}
# separating decoder weights
_lowerCamelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_lowerCamelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_lowerCamelCase : Dict = [k.endswith(_lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCamelCase ):
continue
_lowerCamelCase : int = DECODER_PATTERNS
_lowerCamelCase : Union[str, Any] = rename_state_dict_key(_lowerCamelCase , _lowerCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_lowerCamelCase : Union[str, Any] = v.T
_lowerCamelCase : str = torch.from_numpy(_lowerCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_lowerCamelCase : Tuple = [k.endswith(_lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCamelCase ):
continue
_lowerCamelCase : Dict = REMAINING_PATTERNS
_lowerCamelCase : Any = rename_state_dict_key(_lowerCamelCase , _lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_lowerCamelCase : int = v.T
_lowerCamelCase : int = torch.from_numpy(_lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_lowerCamelCase : Any = mapping["model.embed_positions.weight"]
_lowerCamelCase : int = mapping.pop("model.embed_positions.weight" )
_lowerCamelCase, _lowerCamelCase : Optional[int] = torch_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
_lowerCamelCase : Tuple = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[str] = tf.train.list_variables(_lowerCamelCase )
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[str] = ["global_step"]
for name, shape in tqdm(_lowerCamelCase , desc="converting tf checkpoint to dict" ):
_lowerCamelCase : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCamelCase : Tuple = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[Any] = array
return tf_weights
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = get_tf_weights_as_numpy(_lowerCamelCase )
_lowerCamelCase : int = convert_bigbird_pegasus(_lowerCamelCase , _lowerCamelCase )
torch_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 46 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCAmelCase : Optional[int] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
_lowerCAmelCase : List[Any] = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
_lowerCAmelCase : str = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="https://github.com/krishnap25/mauve" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/krishnap25/mauve"] ,reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[Any]="auto" ,__lowerCAmelCase: Union[str, Any]=-1 ,__lowerCAmelCase: List[Any]=0.9 ,__lowerCAmelCase: Dict=5 ,__lowerCAmelCase: Union[str, Any]=500 ,__lowerCAmelCase: int="gpt2-large" ,__lowerCAmelCase: Tuple=-1 ,__lowerCAmelCase: Tuple=1_024 ,__lowerCAmelCase: int=25 ,__lowerCAmelCase: Optional[Any]=5 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: List[str]=25 ,):
'''simple docstring'''
_lowerCamelCase : Tuple = compute_mauve(
p_text=__lowerCAmelCase ,q_text=__lowerCAmelCase ,p_features=__lowerCAmelCase ,q_features=__lowerCAmelCase ,p_tokens=__lowerCAmelCase ,q_tokens=__lowerCAmelCase ,num_buckets=__lowerCAmelCase ,pca_max_data=__lowerCAmelCase ,kmeans_explained_var=__lowerCAmelCase ,kmeans_num_redo=__lowerCAmelCase ,kmeans_max_iter=__lowerCAmelCase ,featurize_model_name=__lowerCAmelCase ,device_id=__lowerCAmelCase ,max_text_length=__lowerCAmelCase ,divergence_curve_discretization_size=__lowerCAmelCase ,mauve_scaling_factor=__lowerCAmelCase ,verbose=__lowerCAmelCase ,seed=__lowerCAmelCase ,)
return out
| 46 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCAmelCase : List[str] = get_tests_dir('''fixtures/dummy-config.json''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,"fake-roberta" )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase ,"config.json" ) ,"w" ) as f:
f.write(json.dumps({} ) )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(type(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
try:
AutoConfig.register("custom" ,__lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("model" ,__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoConfig.register("bert" ,__lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"bert-base is not a local folder and is not a valid model identifier" ):
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,revision="aaaaaa" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowerCAmelCase ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,):
_lowerCamelCase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = AutoConfig.from_pretrained(__lowerCAmelCase ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" )
def _lowercase ( self: Dict ):
'''simple docstring'''
class A_ ( _a ):
lowerCAmelCase__ = 'new-model'
try:
AutoConfig.register("new-model" ,__lowerCAmelCase )
# If remote code is not set, the default is to use local
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=__lowerCAmelCase )
self.assertEqual(config.__class__.__name__ ,"NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 46 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Dict = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : str = UNetaDConditionModel(
sample_size=32 ,layers_per_block=1 ,block_out_channels=[32, 64] ,down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] ,mid_block_type="UNetMidBlock2DSimpleCrossAttn" ,up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] ,in_channels=3 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type="text" ,addition_embed_type_num_heads=2 ,cross_attention_norm="group_norm" ,resnet_time_scale_shift="scale_shift" ,act_fn="gelu" ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type="epsilon" ,variance_type="learned_range" ,)
torch.manual_seed(0 )
_lowerCamelCase : Tuple = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Dict = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDConditionModel(
sample_size=32 ,layers_per_block=[1, 2] ,block_out_channels=[32, 64] ,down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] ,mid_block_type="UNetMidBlock2DSimpleCrossAttn" ,up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] ,in_channels=6 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type="text" ,addition_embed_type_num_heads=2 ,cross_attention_norm="group_norm" ,resnet_time_scale_shift="scale_shift" ,act_fn="gelu" ,class_embed_type="timestep" ,mid_block_scale_factor=1.4_14 ,time_embedding_act_fn="gelu" ,time_embedding_dim=32 ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_lowerCamelCase : List[str] = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,thresholding=__lowerCAmelCase ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type="epsilon" ,variance_type="learned_range" ,)
torch.manual_seed(0 )
_lowerCamelCase : Any = DDPMScheduler(
num_train_timesteps=1_000 ,beta_schedule="squaredcos_cap_v2" ,beta_start=0.00_01 ,beta_end=0.02 ,)
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs["prompt"]
_lowerCamelCase : List[Any] = inputs["generator"]
_lowerCamelCase : str = inputs["num_inference_steps"]
_lowerCamelCase : Optional[Any] = inputs["output_type"]
if "image" in inputs:
_lowerCamelCase : Optional[Any] = inputs["image"]
else:
_lowerCamelCase : Optional[Any] = None
if "mask_image" in inputs:
_lowerCamelCase : Dict = inputs["mask_image"]
else:
_lowerCamelCase : Optional[Any] = None
if "original_image" in inputs:
_lowerCamelCase : Tuple = inputs["original_image"]
else:
_lowerCamelCase : Dict = None
_lowerCamelCase, _lowerCamelCase : int = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
_lowerCamelCase : str = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_lowerCamelCase : Union[str, Any] = image
if mask_image is not None:
_lowerCamelCase : Union[str, Any] = mask_image
if original_image is not None:
_lowerCamelCase : Tuple = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase ,__lowerCAmelCase ) is None ,F"""`{optional_component}` did not stay set to None after loading.""" ,)
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Tuple = inputs["generator"]
_lowerCamelCase : Optional[int] = inputs["num_inference_steps"]
_lowerCamelCase : List[Any] = inputs["output_type"]
# inputs with prompt converted to embeddings
_lowerCamelCase : str = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_lowerCamelCase : Tuple = image
if mask_image is not None:
_lowerCamelCase : Dict = mask_image
if original_image is not None:
_lowerCamelCase : List[str] = original_image
_lowerCamelCase : List[Any] = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : str = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase ,1e-4 )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[str] = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Dict = pipe_loaded(**__lowerCAmelCase )[0]
_lowerCamelCase : Dict = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase ,1e-4 )
| 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list:
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase, _lowerCamelCase : List[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_lowerCamelCase : int = result + left + right
return input_list
def lowerCamelCase_( _lowerCamelCase ) -> list:
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return input_list
_lowerCamelCase : Dict = list(_lowerCamelCase )
# iteration for two-way merging
_lowerCamelCase : Dict = 2
while p <= len(_lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCamelCase : int = i
_lowerCamelCase : Union[str, Any] = i + p - 1
_lowerCamelCase : Optional[Any] = (low + high + 1) // 2
_lowerCamelCase : Union[str, Any] = merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(_lowerCamelCase ):
_lowerCamelCase : Optional[int] = i
_lowerCamelCase : Any = merge(_lowerCamelCase , 0 , _lowerCamelCase , len(_lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCAmelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
_lowerCAmelCase : List[Any] = []
else:
_lowerCAmelCase : str = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
return len(set(_lowerCamelCase ) ) == len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | Literal[False]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Any = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : List[str] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCamelCase )
_lowerCamelCase : str = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Any = "*"
_lowerCamelCase : Optional[int] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
_lowerCamelCase : List[Any] = list(set(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCamelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(_lowerCamelCase )
_lowerCamelCase : Optional[int] = list(_lowerCamelCase )
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Any = j
if count == 1:
_lowerCamelCase : Union[str, Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : Any = count_n
_lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
_lowerCamelCase : Any = 0
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : str = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : List[Any] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = 1
return chart
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : str = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Tuple = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
_lowerCamelCase : Any = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = name
_lowerCamelCase : int = val
def __str__( self: str ):
'''simple docstring'''
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self: Dict ,__lowerCAmelCase: Any ):
'''simple docstring'''
return self.val < other.val
class A_ :
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Union[str, Any] = self.build_heap(__lowerCAmelCase )
def __getitem__( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
return self.get_value(__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
return (idx - 1) // 2
def _lowercase ( self: int ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
return idx * 2 + 1
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ):
'''simple docstring'''
return idx * 2 + 2
def _lowercase ( self: int ,__lowerCAmelCase: str ):
'''simple docstring'''
return self.heap_dict[key]
def _lowercase ( self: Any ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = len(__lowerCAmelCase ) - 1
_lowerCamelCase : List[str] = self.get_parent_idx(__lowerCAmelCase )
for idx, i in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Dict = idx
_lowerCamelCase : Optional[int] = i.val
for i in range(__lowerCAmelCase ,-1 ,-1 ):
self.sift_down(__lowerCAmelCase ,__lowerCAmelCase )
return array
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
while True:
_lowerCamelCase : List[Any] = self.get_left_child_idx(__lowerCAmelCase ) # noqa: E741
_lowerCamelCase : Optional[int] = self.get_right_child_idx(__lowerCAmelCase )
_lowerCamelCase : List[str] = idx
if l < len(__lowerCAmelCase ) and array[l] < array[idx]:
_lowerCamelCase : Tuple = l
if r < len(__lowerCAmelCase ) and array[r] < array[smallest]:
_lowerCamelCase : List[Any] = r
if smallest != idx:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = array[smallest], array[idx]
(
(
_lowerCamelCase
), (
_lowerCamelCase
),
) : Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_lowerCamelCase : Tuple = smallest
else:
break
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_parent_idx(__lowerCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_lowerCamelCase, _lowerCamelCase : Tuple = self.heap[idx], self.heap[p]
_lowerCamelCase, _lowerCamelCase : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_lowerCamelCase : Tuple = p
_lowerCamelCase : Optional[Any] = self.get_parent_idx(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return self.heap[0]
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.heap[-1], self.heap[0]
_lowerCamelCase, _lowerCamelCase : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_lowerCamelCase : Optional[int] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _lowercase ( self: Tuple ,__lowerCAmelCase: str ):
'''simple docstring'''
self.heap.append(__lowerCAmelCase )
_lowerCamelCase : str = len(self.heap ) - 1
_lowerCamelCase : str = node.val
self.sift_up(len(self.heap ) - 1 )
def _lowercase ( self: Any ):
'''simple docstring'''
return len(self.heap ) == 0
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_lowerCamelCase : List[str] = new_value
_lowerCamelCase : Union[str, Any] = new_value
self.sift_up(self.idx_of_element[node] )
_lowerCAmelCase : int = Node('''R''', -1)
_lowerCAmelCase : List[Any] = Node('''B''', 6)
_lowerCAmelCase : List[str] = Node('''A''', 3)
_lowerCAmelCase : Optional[int] = Node('''X''', 1)
_lowerCAmelCase : str = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCAmelCase : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Tuple = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_lowerCamelCase : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ,variant=__lowerCAmelCase ) )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Any = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_lowerCamelCase : str = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ,variant=__lowerCAmelCase ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_lowerCamelCase : Union[str, Any] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ,variant=__lowerCAmelCase ) )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Union[str, Any] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ,variant=__lowerCAmelCase ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_lowerCamelCase : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ,variant=__lowerCAmelCase ) )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_lowerCamelCase : List[Any] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ,variant=__lowerCAmelCase ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_lowerCamelCase : str = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ,variant=__lowerCAmelCase ) )
| 46 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def _lowercase ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = SpeechTaTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = AddedToken("<mask>" ,lstrip=__lowerCAmelCase ,rstrip=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = "this is a test"
_lowerCamelCase : Optional[Any] = "this is a test"
return input_text, output_text
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any=False ,__lowerCAmelCase: str=20 ,__lowerCAmelCase: List[Any]=5 ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "<pad>"
_lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(vocab_keys[-4] ,"œ" )
self.assertEqual(vocab_keys[-2] ,"<mask>" )
self.assertEqual(vocab_keys[-1] ,"<ctc_blank>" )
self.assertEqual(len(__lowerCAmelCase ) ,81 )
def _lowercase ( self: Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCamelCase : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_lowerCamelCase : Any = tokenizer.add_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size + len(__lowerCAmelCase ) )
_lowerCamelCase : Any = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
_lowerCamelCase : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_lowerCamelCase : str = tokenizer.add_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.vocab_size
_lowerCamelCase : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase ,0 )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase ,len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase ,all_size_a + len(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
_lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase ,[SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase ,model_name="microsoft/speecht5_asr" ,revision="c5ef64c71905caeccde0e4462ef3f9077224c524" ,sequences=__lowerCAmelCase ,)
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if len(_lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(_lowerCamelCase , n - 1 )
rec_insertion_sort(_lowerCamelCase , n - 1 )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
if index >= len(_lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_lowerCamelCase, _lowerCamelCase : Dict = (
collection[index],
collection[index - 1],
)
insert_next(_lowerCamelCase , index + 1 )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = input('''Enter integers separated by spaces: ''')
_lowerCAmelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 46 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 46 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['transformers', 'torch', 'note_seq']
def __init__( self: Union[str, Any] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(self ,["transformers", "torch", "note_seq"] )
@classmethod
def _lowercase ( cls: Any ,*__lowerCAmelCase: str ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
requires_backends(cls ,["transformers", "torch", "note_seq"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: str ):
'''simple docstring'''
requires_backends(cls ,["transformers", "torch", "note_seq"] )
| 46 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 46 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Union[str, Any]=7 ,__lowerCAmelCase: Tuple=3 ,__lowerCAmelCase: List[Any]=30 ,__lowerCAmelCase: Any=400 ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Any=0.9 ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Tuple=[0.5, 0.5, 0.5] ,__lowerCAmelCase: Union[str, Any]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCamelCase : List[str] = size if size is not None else {"shortest_edge": 30}
_lowerCamelCase : List[Any] = crop_size if crop_size is not None else {"height": 30, "width": 30}
_lowerCamelCase : List[str] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Optional[int] = min_resolution
_lowerCamelCase : Dict = max_resolution
_lowerCamelCase : Dict = do_resize_and_center_crop
_lowerCamelCase : int = size
_lowerCamelCase : List[str] = crop_pct
_lowerCamelCase : Tuple = crop_size
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : str = image_mean
_lowerCamelCase : Optional[Any] = image_std
def _lowercase ( self: List[str] ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = PoolFormerImageProcessor if is_vision_available() else None
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = PoolFormerImageProcessingTester(self )
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"crop_pct" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_std" ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} )
_lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : List[Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : Tuple = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class A_ ( _a , _a ):
lowerCAmelCase__ = 'bit'
lowerCAmelCase__ = ['preactivation', 'bottleneck']
lowerCAmelCase__ = ['SAME', 'VALID']
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=3 ,__lowerCAmelCase: List[str]=64 ,__lowerCAmelCase: Union[str, Any]=[256, 512, 1_024, 2_048] ,__lowerCAmelCase: Optional[int]=[3, 4, 6, 3] ,__lowerCAmelCase: str="preactivation" ,__lowerCAmelCase: Tuple="relu" ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Optional[Any]=False ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Dict=1 ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: str=None ,**__lowerCAmelCase: Any ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCamelCase : List[Any] = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCamelCase : str = num_channels
_lowerCamelCase : str = embedding_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : Any = layer_type
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = global_padding
_lowerCamelCase : Tuple = num_groups
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : List[Any] = embedding_dynamic_padding
_lowerCamelCase : Any = output_stride
_lowerCamelCase : List[str] = width_factor
_lowerCamelCase : List[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(__lowerCAmelCase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,stage_names=self.stage_names )
| 46 | 1 |
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowerCAmelCase : Optional[Any] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
_lowerCAmelCase : Any = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_lowerCAmelCase : str = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : List[str] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_lowerCAmelCase : Dict = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : Any = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
_lowerCAmelCase : Any = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : int = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
_lowerCAmelCase : List[str] = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
_lowerCAmelCase : Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
_lowerCAmelCase : List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
_lowerCAmelCase : str = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
_lowerCAmelCase : List[str] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
_lowerCAmelCase : List[Any] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
_lowerCAmelCase : List[str] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
_lowerCAmelCase : List[Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
_lowerCAmelCase : Optional[int] = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
_lowerCAmelCase : List[str] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
_lowerCAmelCase : Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
_lowerCAmelCase : Any = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
_lowerCAmelCase : str = ''''''
_lowerCAmelCase : List[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
_lowerCAmelCase : str = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase : Optional[Any] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
assert ReadMe.from_string(_lowerCamelCase , _lowerCamelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(_lowerCamelCase , match=re.escape(expected_error.format(path="root" ) ) ):
_lowerCamelCase : Optional[int] = ReadMe.from_string(_lowerCamelCase , _lowerCamelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
ReadMe.from_string(_lowerCamelCase , _lowerCamelCase , suppress_parsing_errors=_lowerCamelCase )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = Path(_lowerCamelCase ) / "README.md"
with open(_lowerCamelCase , "w+" ) as readme_file:
readme_file.write(_lowerCamelCase )
_lowerCamelCase : int = ReadMe.from_readme(_lowerCamelCase , _lowerCamelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = Path(_lowerCamelCase ) / "README.md"
with open(_lowerCamelCase , "w+" ) as readme_file:
readme_file.write(_lowerCamelCase )
_lowerCamelCase : str = expected_error.format(path=_lowerCamelCase )
with pytest.raises(_lowerCamelCase , match=re.escape(_lowerCamelCase ) ):
_lowerCamelCase : List[str] = ReadMe.from_readme(_lowerCamelCase , _lowerCamelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : str = Path(_lowerCamelCase ) / "README.md"
with open(_lowerCamelCase , "w+" ) as readme_file:
readme_file.write(_lowerCamelCase )
_lowerCamelCase : Optional[int] = expected_error.format(path=_lowerCamelCase )
with pytest.raises(_lowerCamelCase , match=re.escape(_lowerCamelCase ) ):
ReadMe.from_readme(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(_lowerCamelCase ) / "README.md"
with open(_lowerCamelCase , "w+" ) as readme_file:
readme_file.write(_lowerCamelCase )
ReadMe.from_readme(_lowerCamelCase , _lowerCamelCase , suppress_parsing_errors=_lowerCamelCase )
| 46 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( _a ):
lowerCAmelCase__ = 'vivit'
def __init__( self: List[Any] ,__lowerCAmelCase: int=224 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: str=[2, 16, 16] ,__lowerCAmelCase: Optional[Any]=3 ,__lowerCAmelCase: List[str]=768 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: Optional[int]=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: Any="gelu_fast" ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Union[str, Any]=0.02 ,__lowerCAmelCase: List[str]=1e-06 ,__lowerCAmelCase: Optional[Any]=True ,**__lowerCAmelCase: Optional[int] ,):
'''simple docstring'''
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = num_frames
_lowerCamelCase : Optional[int] = tubelet_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 46 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = MgpstrTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "tester"
_lowerCamelCase : Optional[Any] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_lowerCamelCase : Optional[Any] = tokenizer.encode([special_token] ,add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) ,1 )
_lowerCamelCase : int = tokenizer.decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCamelCase, _lowerCamelCase : List[Any] = self.get_input_output_texts(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertNotEqual(len(__lowerCAmelCase ) ,0 )
_lowerCamelCase : Optional[int] = tokenizer.decode(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
self.assertEqual(text_a.replace(" " ,"" ) ,__lowerCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self: str ):
'''simple docstring'''
pass
| 46 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Dict = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = ['''YolosFeatureExtractor''']
_lowerCAmelCase : Optional[Any] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 46 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 46 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=3 ,)
return model
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = self.dummy_uncond_unet
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : List[Any] = self.dummy_vq_model
_lowerCamelCase : List[str] = LDMPipeline(unet=__lowerCAmelCase ,vqvae=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = ldm(generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="numpy" ).images
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : List[str] = ldm(generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="numpy" ,return_dict=__lowerCAmelCase )[0]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : Any = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=__lowerCAmelCase ,num_inference_steps=5 ,output_type="numpy" ).images
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Dict = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_lowerCamelCase : List[Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 46 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCamelCase_( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def lowerCamelCase_( _lowerCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowerCamelCase : List[str] = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
_lowerCamelCase : Tuple = rabinMiller.generate_large_prime(_lowerCamelCase )
_lowerCamelCase : Dict = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_lowerCamelCase : Tuple = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_lowerCamelCase : str = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
_lowerCamelCase : Dict = (n, e)
_lowerCamelCase : Dict = (n, d)
return (public_key, private_key)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowerCamelCase, _lowerCamelCase : Dict = generate_key(_lowerCamelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 46 | 1 |
"""simple docstring"""
import string
from math import logaa
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_lowerCamelCase : Optional[Any] = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[int, int]:
'''simple docstring'''
_lowerCamelCase : Dict = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_lowerCamelCase : Optional[Any] = corpus_without_punctuation.split("\n" )
_lowerCamelCase : Any = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCamelCase ))
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 46 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_lowerCAmelCase : Optional[int] = '''<<<<<<< This should probably be modified because it mentions: '''
_lowerCAmelCase : str = '''=======
>>>>>>>
'''
_lowerCAmelCase : Dict = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
_lowerCAmelCase : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A_ ( _a ):
@staticmethod
def _lowercase ( __lowerCAmelCase: ArgumentParser ):
'''simple docstring'''
_lowerCamelCase : List[str] = parser.add_parser(
"convert" ,help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." ,)
train_parser.add_argument(
"--tfds_path" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." ,)
train_parser.add_argument(
"--datasets_directory" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self: str ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = get_logger("datasets-cli/converting" )
_lowerCamelCase : Any = tfds_path
_lowerCamelCase : Dict = datasets_directory
def _lowercase ( self: Tuple ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Tuple = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : str = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_lowerCamelCase : Tuple = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowerCAmelCase ,encoding="utf-8" ) as f:
_lowerCamelCase : Optional[Any] = f.readlines()
_lowerCamelCase : int = []
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : List[str] = []
for line in lines:
_lowerCamelCase : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Optional[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : Optional[int] = ""
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : Dict = "from datasets import logging\n"
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace("getLogger" ,"get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = list(filter(lambda __lowerCAmelCase : e in out_line ,__lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + "\n" )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : Dict = re.sub(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Any = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" ,__lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_lowerCamelCase : str = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Optional[int] = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace(".py" ,"" )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase ,"w" ,encoding="utf-8" ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_lowerCamelCase : Dict = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Any = imports_to_builder_map[f_name.replace(".py" ,"" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__lowerCAmelCase ,__lowerCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 46 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase : List[str] = 10
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = (left + right) // 3 + 1
_lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCamelCase : Union[str, Any] = one_third - 1
elif array[two_third] < target:
_lowerCamelCase : Any = two_third + 1
else:
_lowerCamelCase : List[str] = one_third + 1
_lowerCamelCase : int = two_third - 1
else:
return -1
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Tuple = (left + right) // 3 + 1
_lowerCamelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCAmelCase : Any = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : Union[str, Any] = ite_ternary_search(collection, target)
_lowerCAmelCase : str = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 46 | 1 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self: List[str] ,__lowerCAmelCase: float ,__lowerCAmelCase: Callable ,__lowerCAmelCase: int ,__lowerCAmelCase: float = 1.0 ,__lowerCAmelCase: str = None ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : str = initial_learning_rate
_lowerCamelCase : Optional[Any] = warmup_steps
_lowerCamelCase : Dict = power
_lowerCamelCase : List[Any] = decay_schedule_fn
_lowerCamelCase : Tuple = name
def __call__( self: List[str] ,__lowerCAmelCase: int ):
'''simple docstring'''
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_lowerCamelCase : List[Any] = tf.cast(__lowerCAmelCase ,tf.floataa )
_lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps ,tf.floataa )
_lowerCamelCase : Union[str, Any] = global_step_float / warmup_steps_float
_lowerCamelCase : Dict = self.initial_learning_rate * tf.math.pow(__lowerCAmelCase ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=__lowerCAmelCase ,)
def _lowercase ( self: Any ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = 0.9 , _lowerCamelCase = 0.9_9_9 , _lowerCamelCase = 1e-8 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0.0 , _lowerCamelCase = 1.0 , _lowerCamelCase = None , ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCamelCase , )
if num_warmup_steps:
_lowerCamelCase : List[str] = WarmUp(
initial_learning_rate=_lowerCamelCase , decay_schedule_fn=_lowerCamelCase , warmup_steps=_lowerCamelCase , )
if weight_decay_rate > 0.0:
_lowerCamelCase : List[str] = AdamWeightDecay(
learning_rate=_lowerCamelCase , weight_decay_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_lowerCamelCase , )
else:
_lowerCamelCase : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCamelCase , beta_a=_lowerCamelCase , beta_a=_lowerCamelCase , epsilon=_lowerCamelCase , clipnorm=_lowerCamelCase , global_clipnorm=_lowerCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_01 ,__lowerCAmelCase: float = 0.9 ,__lowerCAmelCase: float = 0.9_99 ,__lowerCAmelCase: float = 1e-7 ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: Optional[List[str]] = None ,__lowerCAmelCase: Optional[List[str]] = None ,__lowerCAmelCase: str = "AdamWeightDecay" ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
super().__init__(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = weight_decay_rate
_lowerCamelCase : Union[str, Any] = include_in_weight_decay
_lowerCamelCase : Dict = exclude_from_weight_decay
@classmethod
def _lowercase ( cls: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {"WarmUp": WarmUp}
return super(__lowerCAmelCase ,cls ).from_config(__lowerCAmelCase ,custom_objects=__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
super(__lowerCAmelCase ,self )._prepare_local(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tf.constant(
self.weight_decay_rate ,name="adam_weight_decay_rate" )
def _lowercase ( self: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] ,use_locking=self._use_locking ,)
return tf.no_op()
def _lowercase ( self: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = list(zip(*__lowerCAmelCase ) )
return super(__lowerCAmelCase ,self ).apply_gradients(zip(__lowerCAmelCase ,__lowerCAmelCase ) ,name=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_lowerCamelCase : int = apply_state or {}
_lowerCamelCase : List[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_lowerCamelCase : Optional[Any] = self._fallback_apply_state(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any]=None ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self._get_lr(var.device ,var.dtype.base_dtype ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self._decay_weights_op(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase ,self )._resource_apply_dense(__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple=None ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = self._get_lr(var.device ,var.dtype.base_dtype ,__lowerCAmelCase )
_lowerCamelCase : Tuple = self._decay_weights_op(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase ,self )._resource_apply_sparse(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def _lowercase ( self: Tuple ,__lowerCAmelCase: Any ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCAmelCase ,__lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCAmelCase ,__lowerCAmelCase ) is not None:
return False
return True
class A_ ( _a ):
def __init__( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = []
_lowerCamelCase : List[str] = None
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
if self._accum_steps is None:
_lowerCamelCase : Any = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=__lowerCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self: str ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
if not self._gradients:
_lowerCamelCase : Dict = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCAmelCase ) ,trainable=__lowerCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCAmelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__lowerCAmelCase )}""" )
for accum_gradient, gradient in zip(self._gradients ,__lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCAmelCase )
self._accum_steps.assign_add(1 )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCAmelCase ) )
| 46 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 100 ) -> int:
'''simple docstring'''
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[int] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
_lowerCamelCase : List[str] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 46 | 1 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor']
lowerCAmelCase__ = 'SamImageProcessor'
def __init__( self: Tuple ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
_lowerCamelCase : int = self.image_processor
_lowerCamelCase : str = -10
_lowerCamelCase : Optional[Any] = self.image_processor.size["longest_edge"]
def __call__( self: Dict ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.image_processor(
__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase ,)
# pop arguments that are not used in the foward but used nevertheless
_lowerCamelCase : Union[str, Any] = encoding_image_processor["original_sizes"]
if hasattr(__lowerCAmelCase ,"numpy" ): # Checks if Torch or TF tensor
_lowerCamelCase : Dict = original_sizes.numpy()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._check_and_preprocess_points(
input_points=__lowerCAmelCase ,input_labels=__lowerCAmelCase ,input_boxes=__lowerCAmelCase ,)
_lowerCamelCase : Optional[Any] = self._normalize_and_convert(
__lowerCAmelCase ,__lowerCAmelCase ,input_points=__lowerCAmelCase ,input_labels=__lowerCAmelCase ,input_boxes=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,)
return encoding_image_processor
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Union[str, Any]="pt" ,):
'''simple docstring'''
if input_points is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size ,__lowerCAmelCase ,original_sizes[0] ) for point in input_points
]
else:
_lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size ,__lowerCAmelCase ,__lowerCAmelCase )
for point, original_size in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowerCamelCase, _lowerCamelCase : int = self._pad_points_and_labels(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[str] = np.array(__lowerCAmelCase )
if input_labels is not None:
_lowerCamelCase : Union[str, Any] = np.array(__lowerCAmelCase )
if input_boxes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size ,__lowerCAmelCase ,original_sizes[0] ,is_bounding_box=__lowerCAmelCase )
for box in input_boxes
]
else:
_lowerCamelCase : str = [
self._normalize_coordinates(self.target_size ,__lowerCAmelCase ,__lowerCAmelCase ,is_bounding_box=__lowerCAmelCase )
for box, original_size in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
_lowerCamelCase : List[str] = np.array(__lowerCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
_lowerCamelCase : str = torch.from_numpy(__lowerCAmelCase )
# boxes batch size of 1 by default
_lowerCamelCase : Dict = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowerCamelCase : Tuple = tf.convert_to_tensor(__lowerCAmelCase )
# boxes batch size of 1 by default
_lowerCamelCase : str = tf.expand_dims(__lowerCAmelCase ,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowerCamelCase : Dict = torch.from_numpy(__lowerCAmelCase )
# point batch size of 1 by default
_lowerCamelCase : Any = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowerCamelCase : Tuple = tf.convert_to_tensor(__lowerCAmelCase )
# point batch size of 1 by default
_lowerCamelCase : Union[str, Any] = tf.expand_dims(__lowerCAmelCase ,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowerCamelCase : Tuple = torch.from_numpy(__lowerCAmelCase )
# point batch size of 1 by default
_lowerCamelCase : List[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowerCamelCase : Optional[Any] = tf.convert_to_tensor(__lowerCAmelCase )
# point batch size of 1 by default
_lowerCamelCase : List[str] = tf.expand_dims(__lowerCAmelCase ,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = max([point.shape[0] for point in input_points] )
_lowerCamelCase : int = []
for i, point in enumerate(__lowerCAmelCase ):
if point.shape[0] != expected_nb_points:
_lowerCamelCase : str = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 )
_lowerCamelCase : Dict = np.append(input_labels[i] ,[self.point_pad_value] )
processed_input_points.append(__lowerCAmelCase )
_lowerCamelCase : str = processed_input_points
return input_points, input_labels
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: np.ndarray ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[str]=False ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : int = original_size
_lowerCamelCase, _lowerCamelCase : Dict = self.image_processor._get_preprocess_shape(__lowerCAmelCase ,longest_edge=__lowerCAmelCase )
_lowerCamelCase : Dict = deepcopy(__lowerCAmelCase ).astype(__lowerCAmelCase )
if is_bounding_box:
_lowerCamelCase : int = coords.reshape(-1 ,2 ,2 )
_lowerCamelCase : str = coords[..., 0] * (new_w / old_w)
_lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowerCamelCase : Any = coords.reshape(-1 ,4 )
return coords
def _lowercase ( self: int ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: str=None ,):
'''simple docstring'''
if input_points is not None:
if hasattr(__lowerCAmelCase ,"numpy" ): # Checks for TF or Torch tensor
_lowerCamelCase : Any = input_points.numpy().tolist()
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or not isinstance(input_points[0] ,__lowerCAmelCase ):
raise ValueError("Input points must be a list of list of floating points." )
_lowerCamelCase : Union[str, Any] = [np.array(__lowerCAmelCase ) for input_point in input_points]
else:
_lowerCamelCase : Dict = None
if input_labels is not None:
if hasattr(__lowerCAmelCase ,"numpy" ):
_lowerCamelCase : Any = input_labels.numpy().tolist()
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or not isinstance(input_labels[0] ,__lowerCAmelCase ):
raise ValueError("Input labels must be a list of list integers." )
_lowerCamelCase : Any = [np.array(__lowerCAmelCase ) for label in input_labels]
else:
_lowerCamelCase : Tuple = None
if input_boxes is not None:
if hasattr(__lowerCAmelCase ,"numpy" ):
_lowerCamelCase : str = input_boxes.numpy().tolist()
if (
not isinstance(__lowerCAmelCase ,__lowerCAmelCase )
or not isinstance(input_boxes[0] ,__lowerCAmelCase )
or not isinstance(input_boxes[0][0] ,__lowerCAmelCase )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_lowerCamelCase : Any = [np.array(__lowerCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
_lowerCamelCase : List[Any] = None
return input_points, input_labels, input_boxes
@property
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(__lowerCAmelCase ) )
def _lowercase ( self: List[str] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
return self.image_processor.post_process_masks(*__lowerCAmelCase ,**__lowerCAmelCase )
| 46 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCAmelCase : Optional[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ ( _a ):
lowerCAmelCase__ = 'esm'
def __init__( self: str ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Any=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[Any]=1_026 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Dict="absolute" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Union[str, Any]=False ,__lowerCAmelCase: str=False ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,mask_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : str = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : Tuple = token_dropout
_lowerCamelCase : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
_lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = EsmFoldConfig(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
_lowerCamelCase : List[str] = get_default_vocab_list()
else:
_lowerCamelCase : Optional[Any] = vocab_list
else:
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,__lowerCAmelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = super().to_dict()
if isinstance(self.esmfold_config ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.trunk is None:
_lowerCamelCase : Optional[int] = TrunkConfig()
elif isinstance(self.trunk ,__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = TrunkConfig(**self.trunk )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 4_8
lowerCAmelCase__ = 1_0_2_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 3_2
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
lowerCAmelCase__ = 4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = None
def _lowercase ( self: Any ):
'''simple docstring'''
if self.structure_module is None:
_lowerCamelCase : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,__lowerCAmelCase ):
_lowerCamelCase : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_lowerCamelCase : Optional[Any] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = asdict(self )
_lowerCamelCase : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class A_ :
lowerCAmelCase__ = 3_8_4
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1_2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 8
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 1E-8
lowerCAmelCase__ = 1E5
def _lowercase ( self: Any ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 46 | 1 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(_lowerCamelCase , x % y )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase = 20 ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = 1
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = lcm(_lowerCamelCase , _lowerCamelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
while second != 0:
_lowerCamelCase : int = first & second
first ^= second
_lowerCamelCase : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = int(input('''Enter the first number: ''').strip())
_lowerCAmelCase : Union[str, Any] = int(input('''Enter the second number: ''').strip())
print(f'''{add(first, second) = }''')
| 46 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class A_ ( _a , _a ):
lowerCAmelCase__ = 1
@register_to_config
def __init__( self: List[Any] ,__lowerCAmelCase: int = 2_000 ,__lowerCAmelCase: float = 0.15 ,__lowerCAmelCase: float = 0.01 ,__lowerCAmelCase: float = 13_48.0 ,__lowerCAmelCase: float = 1e-5 ,__lowerCAmelCase: int = 1 ,):
'''simple docstring'''
_lowerCamelCase : int = sigma_max
# setable values
_lowerCamelCase : Tuple = None
self.set_sigmas(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: float = None ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : str = torch.linspace(1 ,__lowerCAmelCase ,__lowerCAmelCase ,device=__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: int ,__lowerCAmelCase: float = None ,__lowerCAmelCase: float = None ,__lowerCAmelCase: float = None ):
'''simple docstring'''
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : List[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : str = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) ,math.log(__lowerCAmelCase ) ,__lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device ) ) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device ) ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[torch.Generator] = None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
_lowerCamelCase : Optional[int] = timestep * torch.ones(
sample.shape[0] ,device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Any = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(__lowerCAmelCase ,__lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Optional[Any] = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : Tuple = diffusion.unsqueeze(-1 )
_lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : str = randn_tensor(
sample.shape ,layout=sample.layout ,generator=__lowerCAmelCase ,device=sample.device ,dtype=sample.dtype )
_lowerCamelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : Optional[int] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase ,prev_sample_mean=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[torch.Generator] = None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Optional[Any] = randn_tensor(sample.shape ,layout=sample.layout ,generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] ,-1 ) ,dim=-1 ).mean()
_lowerCamelCase : List[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1 ) ,dim=-1 ).mean()
_lowerCamelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Dict = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : Optional[int] = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = timesteps.to(original_samples.device )
_lowerCamelCase : str = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self: List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 46 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase : Tuple = '''\
Text data.
Second line of data.'''
_lowerCAmelCase : str = '''file'''
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCamelCase : List[str] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCamelCase : Tuple = input_paths[compression_format]
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Any = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : List[Any] = f.read()
with open(_lowerCamelCase ) as f:
_lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "custom_cache"
_lowerCamelCase : List[str] = "custom_extracted_dir"
_lowerCamelCase : str = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
_lowerCamelCase : int = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCamelCase : int = xz_file
_lowerCamelCase : List[Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
_lowerCamelCase : Dict = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Tuple = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
_lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
_lowerCamelCase : List[Any] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
_lowerCamelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 46 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.