code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any]=13 , __magic_name__ : Optional[Any]=32 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Dict=[10, 20, 30, 40] , __magic_name__ : str=[2, 2, 3, 2] , __magic_name__ : Tuple=True , __magic_name__ : Any=True , __magic_name__ : Tuple=37 , __magic_name__ : int="gelu" , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : int=["stage2", "stage3", "stage4"] , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=None , ) -> Tuple:
"""simple docstring"""
__snake_case : str = parent
__snake_case : List[Any] = batch_size
__snake_case : Any = image_size
__snake_case : int = num_channels
__snake_case : List[Any] = num_stages
__snake_case : Union[str, Any] = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = is_training
__snake_case : Any = use_labels
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : str = out_features
__snake_case : str = num_labels
__snake_case : Dict = scope
__snake_case : Optional[int] = num_stages
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__magic_name__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__magic_name__ , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = UperNetForSemanticSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : Tuple = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__: Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__: int = False
lowercase__: str = False
lowercase__: Optional[Any] = False
lowercase__: List[str] = False
lowercase__: str = False
lowercase__: str = False
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = UperNetModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any ):
__snake_case : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any = _config_zero_init(__magic_name__ )
__snake_case : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = UperNetForSemanticSegmentation.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : Dict = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
__snake_case : List[str] = Image.open(_lowerCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
__snake_case : int = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(__magic_name__ )
__snake_case : Tuple = prepare_img()
__snake_case : int = processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
with torch.no_grad():
__snake_case : Union[str, Any] = model(**__magic_name__ )
__snake_case : str = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : List[str] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 ) )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
__snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(__magic_name__ )
__snake_case : Union[str, Any] = prepare_img()
__snake_case : Optional[int] = processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
__snake_case : Optional[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : str = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : Tuple = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_lowerCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_lowerCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_lowerCamelCase )
return parser.parse_args()
def _a ( ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = parse_args()
# Import training_script as a module.
__snake_case : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__snake_case : Any = script_fpath.stem
__snake_case : List[str] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
__snake_case : int = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class _A ( __lowercase ):
lowercase__: List[str] = '''conditional_detr'''
lowercase__: Optional[int] = ['''past_key_values''']
lowercase__: Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , __magic_name__ : Optional[Any]=True , __magic_name__ : int=None , __magic_name__ : str=3 , __magic_name__ : Tuple=3_00 , __magic_name__ : str=6 , __magic_name__ : Any=20_48 , __magic_name__ : Optional[int]=8 , __magic_name__ : Dict=6 , __magic_name__ : Optional[int]=20_48 , __magic_name__ : List[str]=8 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : Any=0.0 , __magic_name__ : int=True , __magic_name__ : List[str]="relu" , __magic_name__ : Tuple=2_56 , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.0 , __magic_name__ : int=0.0 , __magic_name__ : str=0.02 , __magic_name__ : List[str]=1.0 , __magic_name__ : Union[str, Any]=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[int]=True , __magic_name__ : Any=False , __magic_name__ : Dict=2 , __magic_name__ : Any=5 , __magic_name__ : Dict=2 , __magic_name__ : int=1 , __magic_name__ : Optional[Any]=1 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[int]=5 , __magic_name__ : Dict=2 , __magic_name__ : Any=0.25 , **__magic_name__ : Dict , ) -> int:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__snake_case : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = backbone_config.get("""model_type""" )
__snake_case : Dict = CONFIG_MAPPING[backbone_model_type]
__snake_case : List[Any] = config_class.from_dict(__magic_name__ )
__snake_case : Any = use_timm_backbone
__snake_case : Tuple = backbone_config
__snake_case : Optional[Any] = num_channels
__snake_case : int = num_queries
__snake_case : Dict = d_model
__snake_case : Optional[int] = encoder_ffn_dim
__snake_case : int = encoder_layers
__snake_case : List[Any] = encoder_attention_heads
__snake_case : List[Any] = decoder_ffn_dim
__snake_case : int = decoder_layers
__snake_case : Optional[Any] = decoder_attention_heads
__snake_case : Any = dropout
__snake_case : int = attention_dropout
__snake_case : Optional[Any] = activation_dropout
__snake_case : Tuple = activation_function
__snake_case : Union[str, Any] = init_std
__snake_case : int = init_xavier_std
__snake_case : Any = encoder_layerdrop
__snake_case : Union[str, Any] = decoder_layerdrop
__snake_case : List[str] = encoder_layers
__snake_case : Tuple = auxiliary_loss
__snake_case : str = position_embedding_type
__snake_case : Dict = backbone
__snake_case : str = use_pretrained_backbone
__snake_case : str = dilation
# Hungarian matcher
__snake_case : int = class_cost
__snake_case : Tuple = bbox_cost
__snake_case : str = giou_cost
# Loss coefficients
__snake_case : Any = mask_loss_coefficient
__snake_case : Any = dice_loss_coefficient
__snake_case : List[str] = cls_loss_coefficient
__snake_case : int = bbox_loss_coefficient
__snake_case : Optional[int] = giou_loss_coefficient
__snake_case : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
return self.d_model
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__snake_case : Union[str, Any] = self.backbone_config.to_dict()
__snake_case : Dict = self.__class__.model_type
return output
class _A ( __lowercase ):
lowercase__: Dict = version.parse('''1.11''' )
@property
def lowercase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
return 12
| 26 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int]=13 , __magic_name__ : Any=7 , __magic_name__ : Any=True , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : str=True , __magic_name__ : Tuple=True , __magic_name__ : str=False , __magic_name__ : List[Any]=False , __magic_name__ : Optional[Any]=False , __magic_name__ : int=2 , __magic_name__ : Any=99 , __magic_name__ : Union[str, Any]=0 , __magic_name__ : Tuple=32 , __magic_name__ : Optional[int]=5 , __magic_name__ : Tuple=4 , __magic_name__ : int=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : str=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=2 , __magic_name__ : List[Any]=4 , __magic_name__ : str="last" , __magic_name__ : Dict=True , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 , ) -> str:
"""simple docstring"""
__snake_case : List[str] = parent
__snake_case : List[str] = batch_size
__snake_case : List[str] = seq_length
__snake_case : Dict = is_training
__snake_case : Optional[Any] = use_input_lengths
__snake_case : Optional[int] = use_token_type_ids
__snake_case : int = use_labels
__snake_case : Any = gelu_activation
__snake_case : List[str] = sinusoidal_embeddings
__snake_case : int = causal
__snake_case : int = asm
__snake_case : Any = n_langs
__snake_case : Any = vocab_size
__snake_case : str = n_special
__snake_case : Union[str, Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Any = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : str = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : List[str] = num_labels
__snake_case : List[str] = num_choices
__snake_case : Tuple = summary_type
__snake_case : List[str] = use_proj
__snake_case : Dict = scope
__snake_case : Any = bos_token_id
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : List[Any] = None
if self.use_input_lengths:
__snake_case : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case : List[str] = None
__snake_case : List[Any] = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : int = ids_tensor([self.batch_size] , 2 ).float()
__snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Any , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = XLMModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ , lengths=__magic_name__ , langs=__magic_name__ )
__snake_case : List[str] = model(__magic_name__ , langs=__magic_name__ )
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[int] , ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = XLMWithLMHeadModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
__snake_case : int = XLMForQuestionAnsweringSimple(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(__magic_name__ )
__snake_case : Any = model(__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ )
__snake_case : Tuple = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , ) -> int:
"""simple docstring"""
__snake_case : Dict = XLMForQuestionAnswering(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = model(__magic_name__ )
__snake_case : Optional[Any] = model(
__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , cls_index=__magic_name__ , is_impossible=__magic_name__ , p_mask=__magic_name__ , )
__snake_case : Dict = model(
__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , cls_index=__magic_name__ , is_impossible=__magic_name__ , )
((__snake_case) , ) : Dict = result_with_labels.to_tuple()
__snake_case : str = model(__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ )
((__snake_case) , ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , ) -> int:
"""simple docstring"""
__snake_case : List[str] = XLMForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
__snake_case : Optional[int] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : List[str] , ) -> str:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Dict = XLMForTokenClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = self.num_choices
__snake_case : Optional[Any] = XLMForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[Any] = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Optional[Any] = config_and_inputs
__snake_case : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__: Union[str, Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase__: Union[str, Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__snake_case : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
__snake_case : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : Any = XLMModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=__magic_name__ , emb_dim=37 )
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__magic_name__ )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__magic_name__ )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__magic_name__ )
def lowercase__ ( self : str , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : int=False , __magic_name__ : Any=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertListEqual(
[isinstance(__magic_name__ , __magic_name__ ) for iter_attentions in attentions] , [True] * len(__magic_name__ ) )
self.assertEqual(len(__magic_name__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__magic_name__ ):
# adds PAD dummy token
__snake_case : List[str] = min_length + idx + 1
__snake_case : List[Any] = min_length + idx + 1
__snake_case : int = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__magic_name__ ) )
def lowercase__ ( self : Any , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Dict=False , __magic_name__ : Optional[int]=1 ) -> Optional[int]:
"""simple docstring"""
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertListEqual(
[isinstance(__magic_name__ , __magic_name__ ) for iter_hidden_states in hidden_states] , [True] * len(__magic_name__ ) , )
self.assertEqual(len(__magic_name__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__magic_name__ ):
# adds PAD dummy token
__snake_case : str = min_length + idx + 1
__snake_case : Union[str, Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__magic_name__ ) , )
pass
@slow
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = XLMModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__magic_name__ )
__snake_case : Any = torch.tensor([[14, 4_47]] , dtype=torch.long , device=__magic_name__ ) # the president
__snake_case : Union[str, Any] = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__snake_case : Tuple = model.generate(__magic_name__ , do_sample=__magic_name__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __magic_name__ )
| 26 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _a ( ) -> Tuple:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCamelCase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def _a ( ) -> Tuple:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def _a ( ) -> Any:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCamelCase ):
http_head("""https://huggingface.co""" )
| 26 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 1 |
'''simple docstring'''
class _A :
def __init__( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = 0
__snake_case : List[Any] = 0
__snake_case : List[Any] = {}
def lowercase__ ( self : List[str] , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
if vertex not in self.adjacency:
__snake_case : List[str] = {}
self.num_vertices += 1
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> int:
"""simple docstring"""
self.add_vertex(__magic_name__ )
self.add_vertex(__magic_name__ )
if head == tail:
return
__snake_case : Tuple = weight
__snake_case : Optional[Any] = weight
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = self.get_edges()
for edge in edges:
__snake_case , __snake_case , __snake_case : Union[str, Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(__magic_name__ ) ):
__snake_case : Optional[int] = list(edges[i] )
edges.sort(key=lambda __magic_name__ : e[2] )
for i in range(len(__magic_name__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__snake_case : Optional[Any] = edges[i][2] + 1
for edge in edges:
__snake_case , __snake_case , __snake_case : Union[str, Any] = edge
__snake_case : List[Any] = weight
__snake_case : Dict = weight
def __str__( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__snake_case : Union[str, Any] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowercase__ ( __magic_name__ : int=None , __magic_name__ : List[str]=None ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = Graph()
if vertices is None:
__snake_case : Tuple = []
if edges is None:
__snake_case : Optional[Any] = []
for vertex in vertices:
g.add_vertex(__magic_name__ )
for edge in edges:
g.add_edge(*__magic_name__ )
return g
class _A :
def __init__( self : int ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[int] = {}
__snake_case : Union[str, Any] = {}
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return len(self.parent )
def lowercase__ ( self : str , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
if item in self.parent:
return self.find(__magic_name__ )
__snake_case : Any = item
__snake_case : str = 0
return item
def lowercase__ ( self : List[Any] , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(__magic_name__ )
if item != self.parent[item]:
__snake_case : int = self.find(self.parent[item] )
return self.parent[item]
def lowercase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = self.find(__magic_name__ )
__snake_case : List[str] = self.find(__magic_name__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__snake_case : int = roota
return roota
if self.rank[roota] < self.rank[roota]:
__snake_case : Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__snake_case : Union[str, Any] = roota
return roota
return None
@staticmethod
def lowercase__ ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = graph.num_vertices
__snake_case : Dict = Graph.UnionFind()
__snake_case : Dict = []
while num_components > 1:
__snake_case : Optional[int] = {}
for vertex in graph.get_vertices():
__snake_case : Union[str, Any] = -1
__snake_case : Optional[Any] = graph.get_edges()
for edge in edges:
__snake_case , __snake_case , __snake_case : Union[str, Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
__snake_case , __snake_case , __snake_case : Optional[Any] = edge
__snake_case : List[Any] = union_find.find(__magic_name__ )
__snake_case : Union[str, Any] = union_find.find(__magic_name__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case : str = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case : Optional[int] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__snake_case , __snake_case , __snake_case : Any = cheap_edge[vertex]
if union_find.find(__magic_name__ ) != union_find.find(__magic_name__ ):
union_find.union(__magic_name__ , __magic_name__ )
mst_edges.append(cheap_edge[vertex] )
__snake_case : int = num_components - 1
__snake_case : Optional[int] = Graph.build(edges=__magic_name__ )
return mst
| 26 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : List[str] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = 1
__snake_case : Optional[int] = 2
for i in range(2 , max_n + 1 ):
__snake_case : int = pre_numerator
__snake_case : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
__snake_case : Dict = cur_numerator
__snake_case : List[str] = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
__snake_case : Optional[int] = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
__snake_case : str = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__snake_case : List[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
__snake_case : List[Any] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_lowerCamelCase )-1}''' )
if "norm" in key:
__snake_case : Union[str, Any] = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__snake_case : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
__snake_case : List[Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_lowerCamelCase )-1}''' )
if "layer_norm1" in key:
__snake_case : Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
__snake_case : Any = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
__snake_case : Optional[int] = key[key.find("""block""" ) + len("""block""" )]
__snake_case : Optional[Any] = key.replace(F'''block{idx}''' , F'''block.{int(_lowerCamelCase )-1}''' )
if "attn.q" in key:
__snake_case : List[str] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
__snake_case : Union[str, Any] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
__snake_case : List[Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
__snake_case : str = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
__snake_case : Union[str, Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
__snake_case : List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
__snake_case : Any = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
__snake_case : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__snake_case : str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
__snake_case : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_lowerCamelCase )-1}''' )
if "bot_conv" in key:
__snake_case : Dict = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
__snake_case : List[str] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
__snake_case : Optional[int] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
__snake_case : Union[str, Any] = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
__snake_case : List[Any] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
__snake_case : List[Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
__snake_case : Dict = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
__snake_case : Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
__snake_case : int = value
return new_state_dict
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__snake_case : Optional[int] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
__snake_case : int = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
__snake_case : str = kv_weight[
: config.hidden_sizes[i], :
]
__snake_case : Dict = kv_bias[: config.hidden_sizes[i]]
__snake_case : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
__snake_case : List[str] = kv_bias[config.hidden_sizes[i] :]
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__snake_case : List[Any] = GLPNImageProcessor()
# prepare image
__snake_case : str = prepare_img()
__snake_case : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
__snake_case : Union[str, Any] = torch.load(_lowerCamelCase , map_location=torch.device("""cpu""" ) )
# rename keys
__snake_case : Any = rename_keys(_lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
__snake_case : Any = GLPNForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
__snake_case : Optional[Any] = model(_lowerCamelCase )
__snake_case : Any = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__snake_case : Any = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
__snake_case : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
__snake_case : List[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
class _A :
def __init__( self : int , __magic_name__ : str , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case , __snake_case : str = text, pattern
__snake_case , __snake_case : List[Any] = len(__magic_name__ ), len(__magic_name__ )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowercase__ ( self : List[Any] , __magic_name__ : int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase__ ( self : Union[str, Any] ) -> list[int]:
"""simple docstring"""
__snake_case : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
__snake_case : List[Any] = self.mismatch_in_text(__magic_name__ )
if mismatch_index == -1:
positions.append(__magic_name__ )
else:
__snake_case : List[Any] = self.match_in_pattern(self.text[mismatch_index] )
__snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCamelCase = "ABAABA"
__UpperCamelCase = "AB"
__UpperCamelCase = BoyerMooreSearch(text, pattern)
__UpperCamelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
def update_area_of_max_square(_lowerCamelCase , _lowerCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case : Dict = update_area_of_max_square(_lowerCamelCase , col + 1 )
__snake_case : Optional[int] = update_area_of_max_square(row + 1 , col + 1 )
__snake_case : Any = update_area_of_max_square(row + 1 , _lowerCamelCase )
if mat[row][col]:
__snake_case : Optional[Any] = 1 + min([right, diagonal, down] )
__snake_case : int = max(largest_square_area[0] , _lowerCamelCase )
return sub_problem_sol
else:
return 0
__snake_case : List[Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case : Tuple = update_area_of_max_square_using_dp_array(_lowerCamelCase , col + 1 , _lowerCamelCase )
__snake_case : Dict = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _lowerCamelCase )
__snake_case : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , _lowerCamelCase , _lowerCamelCase )
if mat[row][col]:
__snake_case : List[Any] = 1 + min([right, diagonal, down] )
__snake_case : List[str] = max(largest_square_area[0] , _lowerCamelCase )
__snake_case : List[str] = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case : Union[str, Any] = [0]
__snake_case : Union[str, Any] = [[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _lowerCamelCase )
return largest_square_area[0]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case : List[Any] = dp_array[row][col + 1]
__snake_case : str = dp_array[row + 1][col + 1]
__snake_case : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case : List[str] = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = max(dp_array[row][col] , _lowerCamelCase )
else:
__snake_case : Optional[Any] = 0
return largest_square_area
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Any = [0] * (cols + 1)
__snake_case : Union[str, Any] = [0] * (cols + 1)
__snake_case : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case : int = current_row[col + 1]
__snake_case : Dict = next_row[col + 1]
__snake_case : int = next_row[col]
if mat[row][col] == 1:
__snake_case : Union[str, Any] = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : Union[str, Any] = max(current_row[col] , _lowerCamelCase )
else:
__snake_case : List[Any] = 0
__snake_case : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 26 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCamelCase = "base_with_context"
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : str = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
__snake_case : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__snake_case : Optional[Any] = weights[F'''layers_{lyr_num}''']
__snake_case : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__snake_case : str = ly_weight["""attention"""]
__snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
__snake_case : str = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__snake_case : Union[str, Any] = weights[F'''layers_{lyr_num}''']
__snake_case : List[Any] = ly_weight["""attention"""]
__snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__snake_case : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
__snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
__snake_case : Any = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=_lowerCamelCase )
__snake_case : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__snake_case : List[Any] = weights[F'''layers_{lyr_num}''']
__snake_case : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
__snake_case : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__snake_case : List[Any] = ly_weight["""self_attention"""]
__snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__snake_case : str = ly_weight["""MultiHeadDotProductAttention_0"""]
__snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__snake_case : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__snake_case : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__snake_case : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__snake_case : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
__snake_case : int = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__snake_case : Any = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
__snake_case : Union[str, Any] = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
__snake_case : str = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
__snake_case : Tuple = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
__snake_case : Any = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
__snake_case : Tuple = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
__snake_case : Dict = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__snake_case : List[Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__snake_case : int = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__snake_case : Any = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , _lowerCamelCase )
__snake_case : Optional[Any] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , _lowerCamelCase )
__snake_case : Optional[int] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , _lowerCamelCase )
__snake_case : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
__snake_case : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__UpperCamelCase = parser.parse_args()
main(args)
| 26 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
__snake_case : List[str] = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
__snake_case : Union[str, Any] = current_sum - array[i] + array[i + k]
__snake_case : Optional[Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCamelCase = [randint(-1000, 1000) for i in range(100)]
__UpperCamelCase = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = BertConfig.from_json_file(_lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
__snake_case : int = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 26 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = CodeGenTokenizer
lowercase__: str = CodeGenTokenizerFast
lowercase__: Dict = True
lowercase__: List[str] = {'''add_prefix_space''': True}
lowercase__: Optional[int] = False
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__snake_case : Any = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
__snake_case : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case : Union[str, Any] = {"""unk_token""": """<unk>"""}
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowercase__ ( self : Dict , **__magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowercase__ ( self : Any , **__magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowercase__ ( self : int , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : int = """lower newer"""
__snake_case : Any = """lower newer"""
return input_text, output_text
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : List[Any] = """lower newer"""
__snake_case : int = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__snake_case : List[str] = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
__snake_case : str = tokens + [tokenizer.unk_token]
__snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case : str = self.get_tokenizer()
__snake_case : int = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
__snake_case : List[Any] = """lower newer"""
# Testing tokenization
__snake_case : int = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
__snake_case : Dict = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids without special tokens
__snake_case : str = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
__snake_case : Any = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids with special tokens
__snake_case : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
__snake_case : List[str] = tokenizer.encode(__magic_name__ , add_prefix_space=__magic_name__ )
__snake_case : Union[str, Any] = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing the unknown token
__snake_case : Dict = tokens + [rust_tokenizer.unk_token]
__snake_case : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : Any , *__magic_name__ : str , **__magic_name__ : int ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Tuple=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : Any = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# Simple input
__snake_case : Any = """This is a simple input"""
__snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case : int = ("""This is a simple input""", """This is a pair""")
__snake_case : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__snake_case : Tuple = """This is a simple input"""
__snake_case : Union[str, Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
__snake_case : int = ("""This is a simple input""", """This is a pair""")
__snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__snake_case : Any = tokenizer.pad_token_id
__snake_case : List[str] = tokenizer(__magic_name__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__snake_case : List[Any] = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
__snake_case : Optional[Any] = tokenizer(*__magic_name__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__snake_case : Optional[Any] = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowercase__ ( self : Any ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = """$$$"""
__snake_case : Union[str, Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__magic_name__ , add_bos_token=__magic_name__ )
__snake_case : Tuple = """This is a simple input"""
__snake_case : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case : int = tokenizer.bos_token_id
__snake_case : List[Any] = tokenizer(__magic_name__ )
__snake_case : str = tokenizer(__magic_name__ )
self.assertEqual(out_s.input_ids[0] , __magic_name__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case : str = tokenizer.decode(out_s.input_ids )
__snake_case : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __magic_name__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
__snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
__snake_case : Any = """\nif len_a > len_b: result = a\nelse: result = b"""
__snake_case : Dict = tokenizer.encode(__magic_name__ )
__snake_case : Any = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
__snake_case : Optional[int] = tokenizer.decode(__magic_name__ , truncate_before_pattern=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
| 26 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 1 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 26 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __lowercase ):
def __init__( self : Union[str, Any] , __magic_name__ : NestedDataStructureLike[PathLike] , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : Optional[Features] = None , __magic_name__ : str = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[int] = None , **__magic_name__ : int , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , num_proc=__magic_name__ , **__magic_name__ , )
__snake_case : Union[str, Any] = field
__snake_case : Dict = path_or_paths if isinstance(__magic_name__ , __magic_name__ ) else {self.split: path_or_paths}
__snake_case : Any = Json(
cache_dir=__magic_name__ , data_files=__magic_name__ , features=__magic_name__ , field=__magic_name__ , **__magic_name__ , )
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
if self.streaming:
__snake_case : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__snake_case : List[str] = None
__snake_case : str = None
__snake_case : List[Any] = None
__snake_case : int = None
self.builder.download_and_prepare(
download_config=__magic_name__ , download_mode=__magic_name__ , verification_mode=__magic_name__ , base_path=__magic_name__ , num_proc=self.num_proc , )
__snake_case : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=__magic_name__ , in_memory=self.keep_in_memory )
return dataset
class _A :
def __init__( self : Any , __magic_name__ : Dataset , __magic_name__ : Union[PathLike, BinaryIO] , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , **__magic_name__ : List[Any] , ) -> List[str]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__snake_case : List[str] = dataset
__snake_case : List[str] = path_or_buf
__snake_case : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : List[str] = num_proc
__snake_case : int = """utf-8"""
__snake_case : Tuple = to_json_kwargs
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.to_json_kwargs.pop("""path_or_buf""" , __magic_name__ )
__snake_case : Optional[Any] = self.to_json_kwargs.pop("""orient""" , """records""" )
__snake_case : List[Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__snake_case : Optional[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__snake_case : Dict = self.to_json_kwargs.pop("""compression""" , __magic_name__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=__magic_name__ ) as buffer:
__snake_case : Any = self._write(file_obj=__magic_name__ , orient=__magic_name__ , lines=__magic_name__ , index=__magic_name__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
__snake_case : Dict = self._write(
file_obj=self.path_or_buf , orient=__magic_name__ , lines=__magic_name__ , index=__magic_name__ , **self.to_json_kwargs )
return written
def lowercase__ ( self : Optional[Any] , __magic_name__ : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = args
__snake_case : List[Any] = query_table(
table=self.dataset.data , key=slice(__magic_name__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : str = batch.to_pandas().to_json(
path_or_buf=__magic_name__ , orient=__magic_name__ , lines=__magic_name__ , index=__magic_name__ , **__magic_name__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase__ ( self : Tuple , __magic_name__ : BinaryIO , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , **__magic_name__ : int , ) -> int:
"""simple docstring"""
__snake_case : Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__magic_name__ )
else:
__snake_case , __snake_case : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __magic_name__ , __magic_name__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(__magic_name__ )
return written
| 26 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = tmp_path / """cache"""
__snake_case : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = tmp_path / """cache"""
__snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : Tuple = features.copy() if features else default_expected_features
__snake_case : int = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : Optional[int] = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Dict = tmp_path / """cache"""
__snake_case : Any = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
__snake_case : Dict = features.copy() if features else default_expected_features
__snake_case : Optional[int] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : Tuple = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
__snake_case : Tuple = features.copy()
__snake_case : List[str] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : int = tmp_path / """cache"""
__snake_case : List[Any] = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = tmp_path / """cache"""
__snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : int = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [jsonl_path]
__snake_case : Optional[int] = tmp_path / """cache"""
__snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ) -> Tuple:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
__snake_case : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = tmp_path / """cache"""
__snake_case : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : Optional[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = tmp_path / """cache"""
__snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : int = features.copy() if features else default_expected_features
__snake_case : Any = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : int = JsonDatasetReader({"""train""": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
if split:
__snake_case : Union[str, Any] = {split: jsonl_path}
else:
__snake_case : Any = """train"""
__snake_case : Optional[int] = {"""train""": jsonl_path, """test""": jsonl_path}
__snake_case : List[Any] = tmp_path / """cache"""
__snake_case : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return json.load(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
return [json.loads(_lowerCamelCase ) for line in buffer]
class _A :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ ).write()
buffer.seek(0 )
__snake_case : Any = load_json_function(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(exported_content[0] , __magic_name__ )
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , orient=__magic_name__ ).write()
buffer.seek(0 )
__snake_case : Any = load_json(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__magic_name__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , num_proc=2 ).write()
buffer.seek(0 )
__snake_case : int = load_json_function(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(exported_content[0] , __magic_name__ )
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def lowercase__ ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , orient=__magic_name__ , num_proc=2 ).write()
buffer.seek(0 )
__snake_case : List[str] = load_json(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__magic_name__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__magic_name__ ) == 10
def lowercase__ ( self : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(__magic_name__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / f'''test.json.{extension}'''
__snake_case : Union[str, Any] = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(__magic_name__ , __magic_name__ , compression=__magic_name__ ).write()
with fsspec.open(__magic_name__ , """rb""" , compression="""infer""" ) as f:
__snake_case : List[Any] = f.read()
with fsspec.open(__magic_name__ , """rb""" , compression="""infer""" ) as f:
__snake_case : Optional[Any] = f.read()
assert exported_content == original_content
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCamelCase = ""
__UpperCamelCase = ""
__UpperCamelCase = ""
__UpperCamelCase = 1 # (0 is vertical, 1 is horizontal)
def _a ( ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[int] = get_dataset(_lowerCamelCase , _lowerCamelCase )
print("""Processing...""" )
__snake_case , __snake_case , __snake_case : Union[str, Any] = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__snake_case : Tuple = random_chars(32 )
__snake_case : Tuple = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
__snake_case : List[Any] = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(_lowerCamelCase )} with {file_name}''' )
__snake_case : str = []
for anno in new_annos[index]:
__snake_case : Union[str, Any] = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(_lowerCamelCase )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> tuple[list, list]:
"""simple docstring"""
__snake_case : int = []
__snake_case : int = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , """*.txt""" ) ):
__snake_case : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__snake_case : List[Any] = in_file.readlines()
__snake_case : Tuple = os.path.join(_lowerCamelCase , F'''{label_name}.jpg''' )
__snake_case : Tuple = []
for obj_list in obj_lists:
__snake_case : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 ) -> tuple[list, list, list]:
"""simple docstring"""
__snake_case : Tuple = []
__snake_case : Optional[Any] = []
__snake_case : List[Any] = []
for idx in range(len(_lowerCamelCase ) ):
__snake_case : str = []
__snake_case : List[Any] = img_list[idx]
path_list.append(_lowerCamelCase )
__snake_case : Optional[Any] = anno_list[idx]
__snake_case : int = cva.imread(_lowerCamelCase )
if flip_type == 1:
__snake_case : List[Any] = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
__snake_case : int = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__snake_case : Any = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
__snake_case : List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _a ( _lowerCamelCase = 32 ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__snake_case : List[str] = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 26 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _a ( _lowerCamelCase , _lowerCamelCase ) -> tuple[str, float]:
"""simple docstring"""
__snake_case : Any = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def _a ( _lowerCamelCase , _lowerCamelCase ) -> tuple[str, str]:
"""simple docstring"""
__snake_case : Optional[Any] = random.randint(0 , len(_lowerCamelCase ) - 1 )
__snake_case : str = parent_a[:random_slice] + parent_a[random_slice:]
__snake_case : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__snake_case : List[str] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> list[str]:
"""simple docstring"""
__snake_case : str = []
# Generate more children proportionally to the fitness score.
__snake_case : Optional[Any] = int(parent_a[1] * 100 ) + 1
__snake_case : int = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
__snake_case : List[str] = population_score[random.randint(0 , _lowerCamelCase )][0]
__snake_case , __snake_case : Optional[Any] = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__snake_case : Dict = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__snake_case : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__snake_case : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_lowerCamelCase )
# Generate random starting population.
__snake_case : Union[str, Any] = []
for _ in range(_lowerCamelCase ):
population.append("""""".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__snake_case , __snake_case : Any = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__snake_case : Any = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
__snake_case : List[str] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__snake_case : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
__snake_case : int = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCamelCase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
__UpperCamelCase = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 26 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__UpperCamelCase = 5
__UpperCamelCase = 10
@require_sentencepiece
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Optional[Any] = SpeechaTextTokenizer
lowercase__: Optional[int] = False
lowercase__: Union[str, Any] = True
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
super().setUp()
__snake_case : Tuple = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
__snake_case : int = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
__snake_case : int = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
__snake_case : Union[str, Any] = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__snake_case : Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
__snake_case : Any = """<pad>"""
__snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 10_01 )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__snake_case : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_89, 50, 14, 1_74, 3_86] , )
__snake_case : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__snake_case : List[Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class _A ( unittest.TestCase ):
lowercase__: List[Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
lowercase__: Union[str, Any] = '''C\'est trop cool'''
lowercase__: Union[str, Any] = '''Esto es genial'''
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
__snake_case : Tuple = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__snake_case : Any = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
__snake_case : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = """fr"""
__snake_case : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__snake_case : Any = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
lowercase__: List[Any] = '''linear'''
lowercase__: Optional[int] = '''cosine'''
lowercase__: Tuple = '''cosine_with_restarts'''
lowercase__: str = '''polynomial'''
lowercase__: Union[str, Any] = '''constant'''
lowercase__: Dict = '''constant_with_warmup'''
lowercase__: List[str] = '''piecewise_constant'''
def _a ( _lowerCamelCase , _lowerCamelCase = -1 ) -> str:
"""simple docstring"""
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = -1 ) -> List[str]:
"""simple docstring"""
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = -1 ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = {}
__snake_case : str = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__snake_case , __snake_case : str = rule_str.split(""":""" )
__snake_case : Optional[int] = int(_lowerCamelCase )
__snake_case : Tuple = float(_lowerCamelCase )
__snake_case : Optional[Any] = value
__snake_case : Optional[Any] = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase , _lowerCamelCase ):
def rule_func(_lowerCamelCase ) -> float:
__snake_case : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__snake_case : Any = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=-1 ) -> List[str]:
"""simple docstring"""
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.5 , _lowerCamelCase = -1 ) -> List[str]:
"""simple docstring"""
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__snake_case : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 ) -> str:
"""simple docstring"""
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__snake_case : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1E-7 , _lowerCamelCase=1.0 , _lowerCamelCase=-1 ) -> Any:
"""simple docstring"""
__snake_case : int = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__snake_case : Tuple = lr_init - lr_end
__snake_case : str = num_training_steps - num_warmup_steps
__snake_case : str = 1 - (current_step - num_warmup_steps) / decay_steps
__snake_case : Union[str, Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 1.0 , _lowerCamelCase = -1 , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = SchedulerType(_lowerCamelCase )
__snake_case : Any = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 26 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_lowerCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 1 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
__UpperCamelCase = False
__UpperCamelCase = False
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
return TrainCommand(_lowerCamelCase )
class _A ( __lowercase ):
@staticmethod
def lowercase__ ( __magic_name__ : ArgumentParser ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__magic_name__ , required=__magic_name__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__magic_name__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__magic_name__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__magic_name__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__magic_name__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__magic_name__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__magic_name__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__magic_name__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__magic_name__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__magic_name__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__magic_name__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__magic_name__ , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__magic_name__ , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__magic_name__ )
def __init__( self : Dict , __magic_name__ : Namespace ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = logging.get_logger("""transformers-cli/training""" )
__snake_case : Dict = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__magic_name__ )
__snake_case : int = args.output
__snake_case : Optional[int] = args.column_label
__snake_case : List[str] = args.column_text
__snake_case : Union[str, Any] = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
__snake_case : List[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
__snake_case : Optional[int] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : Optional[int] = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
__snake_case : List[str] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : int = args.validation_split
__snake_case : Tuple = args.train_batch_size
__snake_case : List[str] = args.valid_batch_size
__snake_case : int = args.learning_rate
__snake_case : Any = args.adam_epsilon
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 26 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 1 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _A ( __lowercase ):
lowercase__: Any = '''bloom'''
lowercase__: List[str] = ['''past_key_values''']
lowercase__: Any = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : str , __magic_name__ : str=25_08_80 , __magic_name__ : Optional[Any]=64 , __magic_name__ : Dict=2 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=1E-5 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=1 , __magic_name__ : int=2 , __magic_name__ : List[str]=False , __magic_name__ : str=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=1 , __magic_name__ : Any=False , **__magic_name__ : List[Any] , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case : List[str] = kwargs.pop("""n_embed""" , __magic_name__ )
__snake_case : Union[str, Any] = hidden_size if n_embed is None else n_embed
__snake_case : Union[str, Any] = n_layer
__snake_case : Dict = n_head
__snake_case : Dict = layer_norm_epsilon
__snake_case : int = initializer_range
__snake_case : Optional[int] = use_cache
__snake_case : List[str] = pretraining_tp
__snake_case : int = apply_residual_connection_post_layernorm
__snake_case : str = hidden_dropout
__snake_case : Tuple = attention_dropout
__snake_case : int = bos_token_id
__snake_case : Any = eos_token_id
__snake_case : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
lowercase__: Optional[int] = version.parse('''1.12''' )
def __init__( self : Optional[int] , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" , __magic_name__ : List[PatchingSpec] = None , __magic_name__ : bool = False , ) -> Optional[int]:
"""simple docstring"""
super().__init__(__magic_name__ , task=__magic_name__ , patching_specs=__magic_name__ , use_past=__magic_name__ )
if not getattr(self._config , """pad_token_id""" , __magic_name__ ):
# TODO: how to do that better?
__snake_case : str = 0
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" , inverted_values_shape=__magic_name__ )
__snake_case : List[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__snake_case : Any = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def lowercase__ ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1E-3
def lowercase__ ( self : List[Any] , __magic_name__ : "PreTrainedTokenizer" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : int = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case : Tuple = seqlen + 2
__snake_case : List[str] = self._config.hidden_size // self.num_attention_heads
__snake_case : Any = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__snake_case : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__snake_case : Dict = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
__snake_case : Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
__snake_case : Any = ordered_inputs["""attention_mask"""].dtype
__snake_case : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return 13
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__UpperCamelCase = get_tests_dir("fixtures/vocab.json")
__UpperCamelCase = get_tests_dir("fixtures")
class _A ( unittest.TestCase ):
lowercase__: str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = 0
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = WavaVecaConfig()
__snake_case : List[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
__snake_case : Optional[int] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
copyfile(__magic_name__ , os.path.join(__magic_name__ , """vocab.json""" ) )
__snake_case : Optional[Any] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Any = WavaVecaFeatureExtractor()
__snake_case : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__snake_case : Union[str, Any] = WavaVecaProcessor(__magic_name__ , __magic_name__ )
# save in new folder
processor.save_pretrained(__magic_name__ )
# drop `processor_class` in tokenizer
with open(os.path.join(__magic_name__ , __magic_name__ ) , """r""" ) as f:
__snake_case : Optional[Any] = json.load(__magic_name__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write(json.dumps(__magic_name__ ) )
__snake_case : List[str] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = WavaVecaFeatureExtractor()
__snake_case : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__snake_case : Optional[Any] = WavaVecaProcessor(__magic_name__ , __magic_name__ )
# save in new folder
processor.save_pretrained(__magic_name__ )
# drop `processor_class` in feature extractor
with open(os.path.join(__magic_name__ , __magic_name__ ) , """r""" ) as f:
__snake_case : str = json.load(__magic_name__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write(json.dumps(__magic_name__ ) )
__snake_case : Union[str, Any] = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__magic_name__ )
# copy relevant files
copyfile(__magic_name__ , os.path.join(__magic_name__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as f:
f.write("""{}""" )
__snake_case : int = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
with self.assertRaises(__magic_name__ ):
__snake_case : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
__snake_case : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
__snake_case : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
__snake_case : Tuple = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
__snake_case : Any = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__snake_case : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ , use_fast=__magic_name__ )
__snake_case : Optional[int] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoFeatureExtractor.register(__magic_name__ , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoProcessor.register(__magic_name__ , __magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoProcessor.register(__magic_name__ , __magic_name__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = CustomFeatureExtractor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : str = os.path.join(__magic_name__ , """vocab.txt""" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__snake_case : int = CustomTokenizer(__magic_name__ )
__snake_case : str = CustomProcessor(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__magic_name__ )
__snake_case : Dict = AutoProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
class _A ( __lowercase ):
lowercase__: Optional[Any] = False
class _A ( __lowercase ):
lowercase__: int = False
class _A ( __lowercase ):
lowercase__: List[Any] = '''AutoFeatureExtractor'''
lowercase__: Tuple = '''AutoTokenizer'''
lowercase__: Union[str, Any] = False
try:
AutoConfig.register("""custom""" , __magic_name__ )
AutoFeatureExtractor.register(__magic_name__ , __magic_name__ )
AutoTokenizer.register(__magic_name__ , slow_tokenizer_class=__magic_name__ )
AutoProcessor.register(__magic_name__ , __magic_name__ )
# If remote code is not set, the default is to use local classes.
__snake_case : Any = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__snake_case : Optional[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__snake_case : List[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__magic_name__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
__snake_case : int = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _A ( unittest.TestCase ):
lowercase__: Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowercase__ ( cls : str ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def lowercase__ ( cls : Dict ) -> str:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : int = WavaVecaProcessor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__magic_name__ , """test-processor""" ) , push_to_hub=__magic_name__ , use_auth_token=self._token )
__snake_case : Union[str, Any] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(new_processor.feature_extractor , __magic_name__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = WavaVecaProcessor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__magic_name__ , """test-processor-org""" ) , push_to_hub=__magic_name__ , use_auth_token=self._token , organization="""valid_org""" , )
__snake_case : Tuple = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(new_processor.feature_extractor , __magic_name__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__snake_case : Optional[int] = CustomFeatureExtractor.from_pretrained(__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = os.path.join(__magic_name__ , """vocab.txt""" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__snake_case : Union[str, Any] = CustomTokenizer(__magic_name__ )
__snake_case : Optional[int] = CustomProcessor(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
__snake_case : str = Repository(__magic_name__ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__magic_name__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) ) as f:
__snake_case : Dict = json.load(__magic_name__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__magic_name__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
__snake_case : List[str] = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _A :
lowercase__: int = None
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __magic_name__ )
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__snake_case : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = os.path.join(__magic_name__ , """feat_extract.json""" )
feat_extract_first.to_json_file(__magic_name__ )
__snake_case : List[str] = self.feature_extraction_class.from_json_file(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowercase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Tuple = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
__snake_case : Union[str, Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = self.feature_extraction_class()
self.assertIsNotNone(__magic_name__ )
| 26 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class _A :
def __init__( self : Optional[int] , __magic_name__ : int | None = None ) -> Any:
"""simple docstring"""
__snake_case : str = value
__snake_case : Node | None = None # Added in order to delete a node easier
__snake_case : Node | None = None
__snake_case : Node | None = None
def __repr__( self : Any ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class _A :
def __init__( self : int , __magic_name__ : Node | None = None ) -> str:
"""simple docstring"""
__snake_case : Tuple = root
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.root )
def lowercase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
__snake_case : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
__snake_case : Any = new_children
else:
__snake_case : Optional[Any] = new_children
else:
__snake_case : Dict = new_children
def lowercase__ ( self : Optional[Any] , __magic_name__ : Node ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase__ ( self : Optional[int] ) -> bool:
"""simple docstring"""
return self.root is None
def lowercase__ ( self : Dict , __magic_name__ : Optional[int] ) -> None:
"""simple docstring"""
__snake_case : Any = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
__snake_case : List[Any] = new_node # set its root
else: # Tree is not empty
__snake_case : Union[str, Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__snake_case : Optional[int] = new_node # We insert the new node in a leaf
break
else:
__snake_case : Optional[int] = parent_node.left
else:
if parent_node.right is None:
__snake_case : Union[str, Any] = new_node
break
else:
__snake_case : str = parent_node.right
__snake_case : List[Any] = parent_node
def lowercase__ ( self : Dict , *__magic_name__ : Union[str, Any] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(__magic_name__ )
def lowercase__ ( self : int , __magic_name__ : Tuple ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__snake_case : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__snake_case : Dict = node.left if value < node.value else node.right
return node
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
__snake_case : List[str] = self.root
if not self.empty():
while node.right is not None:
__snake_case : Optional[Any] = node.right
return node
def lowercase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None:
"""simple docstring"""
if node is None:
__snake_case : List[str] = self.root
if self.root is None:
return None
if not self.empty():
__snake_case : Dict = self.root
while node.left is not None:
__snake_case : Optional[Any] = node.left
return node
def lowercase__ ( self : str , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case : Dict = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
__snake_case : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__snake_case : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase__ ( self : int , __magic_name__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase__ ( self : str , __magic_name__ : list , __magic_name__ : Node | None ) -> None:
"""simple docstring"""
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : Node ) -> int:
"""simple docstring"""
__snake_case : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def _a ( _lowerCamelCase ) -> list[Node]:
"""simple docstring"""
__snake_case : int = []
if curr_node is not None:
__snake_case : List[Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Optional[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__snake_case : str = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _A ( __lowercase ):
lowercase__: Union[str, Any] = '''longformer'''
def __init__( self : List[str] , __magic_name__ : Union[List[int], int] = 5_12 , __magic_name__ : int = 2 , __magic_name__ : int = 1 , __magic_name__ : int = 0 , __magic_name__ : int = 2 , __magic_name__ : int = 3_05_22 , __magic_name__ : int = 7_68 , __magic_name__ : int = 12 , __magic_name__ : int = 12 , __magic_name__ : int = 30_72 , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 5_12 , __magic_name__ : int = 2 , __magic_name__ : float = 0.02 , __magic_name__ : float = 1E-12 , __magic_name__ : bool = False , **__magic_name__ : str , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
__snake_case : int = attention_window
__snake_case : Optional[int] = sep_token_id
__snake_case : List[Any] = bos_token_id
__snake_case : Tuple = eos_token_id
__snake_case : List[str] = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Any = intermediate_size
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Optional[Any] = onnx_export
class _A ( __lowercase ):
def __init__( self : Dict , __magic_name__ : "PretrainedConfig" , __magic_name__ : str = "default" , __magic_name__ : "List[PatchingSpec]" = None ) -> Any:
"""simple docstring"""
super().__init__(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Optional[int] = True
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : int = super().outputs
if self.task == "default":
__snake_case : Tuple = {0: """batch"""}
return outputs
@property
def lowercase__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def lowercase__ ( self : Dict , __magic_name__ : "PreTrainedTokenizerBase" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : int = super().generate_dummy_inputs(
preprocessor=__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__snake_case : List[str] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
__snake_case : Optional[Any] = 1
return inputs
| 26 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase = 10 , _lowerCamelCase = 1000 , _lowerCamelCase = True ) -> int:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> None:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(_lowerCamelCase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
__snake_case : Any = lower
__snake_case : List[Any] = higher
__snake_case : Tuple = []
while True:
__snake_case : List[str] = get_avg(_lowerCamelCase , _lowerCamelCase )
last_numbers.append(_lowerCamelCase )
if answer(_lowerCamelCase ) == "low":
__snake_case : Union[str, Any] = number
elif answer(_lowerCamelCase ) == "high":
__snake_case : Dict = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def _a ( ) -> None:
"""simple docstring"""
__snake_case : List[Any] = int(input("""Enter lower value : """ ).strip() )
__snake_case : Tuple = int(input("""Enter high value : """ ).strip() )
__snake_case : Tuple = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
lowercase__: Any = '''encoder-decoder'''
lowercase__: str = True
def __init__( self : str , **__magic_name__ : int ) -> str:
"""simple docstring"""
super().__init__(**__magic_name__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__snake_case : Any = kwargs.pop("""encoder""" )
__snake_case : Union[str, Any] = encoder_config.pop("""model_type""" )
__snake_case : Optional[int] = kwargs.pop("""decoder""" )
__snake_case : Dict = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__snake_case : Optional[Any] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
__snake_case : Optional[Any] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
__snake_case : List[str] = True
@classmethod
def lowercase__ ( cls : Optional[Any] , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : Dict ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__snake_case : List[Any] = True
__snake_case : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
__snake_case : int = self.encoder.to_dict()
__snake_case : List[str] = self.decoder.to_dict()
__snake_case : Tuple = self.__class__.model_type
return output
| 26 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1024 ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : int = [], []
__snake_case : List[str] = list(zip(_lowerCamelCase , _lowerCamelCase ) )
__snake_case , __snake_case : Union[str, Any] = sorted_examples[0]
def is_too_big(_lowerCamelCase ):
return tok(_lowerCamelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__snake_case : Union[str, Any] = new_src + """ """ + src
__snake_case : List[Any] = new_tgt + """ """ + tgt
if is_too_big(_lowerCamelCase ) or is_too_big(_lowerCamelCase ): # cant fit, finalize example
finished_src.append(_lowerCamelCase )
finished_tgt.append(_lowerCamelCase )
__snake_case , __snake_case : Optional[int] = src, tgt
else: # can fit, keep adding
__snake_case , __snake_case : Dict = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowerCamelCase )
finished_tgt.append(_lowerCamelCase )
return finished_src, finished_tgt
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = Path(_lowerCamelCase )
save_path.mkdir(exist_ok=_lowerCamelCase )
for split in ["train"]:
__snake_case , __snake_case : int = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__snake_case : Union[str, Any] = [x.rstrip() for x in Path(_lowerCamelCase ).open().readlines()]
__snake_case : str = [x.rstrip() for x in Path(_lowerCamelCase ).open().readlines()]
__snake_case , __snake_case : Optional[Any] = pack_examples(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(F'''packed {split} split from {len(_lowerCamelCase )} examples -> {len(_lowerCamelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(_lowerCamelCase ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(_lowerCamelCase ) )
for split in ["val", "test"]:
__snake_case , __snake_case : Any = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_lowerCamelCase , save_path / F'''{split}.source''' )
shutil.copyfile(_lowerCamelCase , save_path / F'''{split}.target''' )
def _a ( ) -> int:
"""simple docstring"""
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=_lowerCamelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=_lowerCamelCase , default=128 )
parser.add_argument("""--data_dir""" , type=_lowerCamelCase )
parser.add_argument("""--save_path""" , type=_lowerCamelCase )
__snake_case : int = parser.parse_args()
__snake_case : List[str] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 26 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase = 400_0000 ) -> int:
"""simple docstring"""
__snake_case : str = [0, 1]
__snake_case : Optional[int] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__snake_case : Tuple = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> float:
"""simple docstring"""
__snake_case : Tuple = 0
while len(_lowerCamelCase ) > 1:
__snake_case : List[str] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__snake_case : Optional[Any] = files.index(min(_lowerCamelCase ) )
temp += files[min_index]
files.pop(_lowerCamelCase )
files.append(_lowerCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
__snake_case : Optional[Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__snake_case : Optional[Any] = min(_lowerCamelCase , _lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 26 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _a ( _lowerCamelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = np.shape(_lowerCamelCase )
if rows != columns:
__snake_case : int = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCamelCase )
__snake_case : Any = np.zeros((rows, columns) )
__snake_case : List[Any] = np.zeros((rows, columns) )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__snake_case : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
__snake_case : Optional[Any] = (table[i][j] - total) / upper[j][j]
__snake_case : List[str] = 1
for j in range(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) )
__snake_case : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase = 1000 ) -> int:
"""simple docstring"""
__snake_case : List[str] = -1
__snake_case : List[str] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__snake_case : List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
__snake_case : Any = n - a - b
if c * c == (a * a + b * b):
__snake_case : Any = a * b * c
if candidate >= product:
__snake_case : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 1 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = logging.get_logger(__name__)
class _A ( enum.Enum ):
lowercase__: List[str] = 0
lowercase__: List[Any] = 1
@add_end_docstrings(__lowercase )
class _A ( __lowercase ):
lowercase__: str = '''generated'''
def __init__( self : Any , *__magic_name__ : Optional[int] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase__ ( self : List[str] , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , __magic_name__ : int=None , __magic_name__ : Any=None , **__magic_name__ : str , ) -> Any:
"""simple docstring"""
__snake_case : Any = {}
if truncation is not None:
__snake_case : List[str] = truncation
__snake_case : Optional[Any] = generate_kwargs
__snake_case : Dict = {}
if return_tensors is not None and return_type is None:
__snake_case : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__snake_case : Any = return_type
if clean_up_tokenization_spaces is not None:
__snake_case : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
__snake_case : Tuple = self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
if len(__magic_name__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
__snake_case : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : List[Any] , *__magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __magic_name__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
__snake_case : List[str] = ([prefix + arg for arg in args[0]],)
__snake_case : Dict = True
elif isinstance(args[0] , __magic_name__ ):
__snake_case : List[str] = (prefix + args[0],)
__snake_case : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
__snake_case : Dict = self.tokenizer(*__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Any , *__magic_name__ : Optional[int] , **__magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = super().__call__(*__magic_name__ , **__magic_name__ )
if (
isinstance(args[0] , __magic_name__ )
and all(isinstance(__magic_name__ , __magic_name__ ) for el in args[0] )
and all(len(__magic_name__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : str=TruncationStrategy.DO_NOT_TRUNCATE , **__magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : Dict = self._parse_and_tokenize(__magic_name__ , truncation=__magic_name__ , **__magic_name__ )
return inputs
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , **__magic_name__ : List[str] ) -> Any:
"""simple docstring"""
if self.framework == "pt":
__snake_case , __snake_case : str = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
__snake_case , __snake_case : Optional[int] = tf.shape(model_inputs["""input_ids"""] ).numpy()
__snake_case : List[str] = generate_kwargs.get("""min_length""" , self.model.config.min_length )
__snake_case : Optional[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__magic_name__ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
__snake_case : Union[str, Any] = self.model.generate(**__magic_name__ , **__magic_name__ )
__snake_case : Optional[int] = output_ids.shape[0]
if self.framework == "pt":
__snake_case : Union[str, Any] = output_ids.reshape(__magic_name__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__snake_case : Tuple = tf.reshape(__magic_name__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Any=ReturnType.TEXT , __magic_name__ : Dict=False ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__snake_case : Optional[int] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
__snake_case : Optional[int] = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
}
records.append(__magic_name__ )
return records
@add_end_docstrings(__lowercase )
class _A ( __lowercase ):
lowercase__: Optional[int] = '''summary'''
def __call__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : str ) -> Any:
"""simple docstring"""
return super().__call__(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(__lowercase )
class _A ( __lowercase ):
lowercase__: Union[str, Any] = '''translation'''
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> str:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowercase__ ( self : Optional[Any] , *__magic_name__ : Dict , __magic_name__ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , __magic_name__ : Any=None , __magic_name__ : List[Any]=None ) -> Tuple:
"""simple docstring"""
if getattr(self.tokenizer , """_build_translation_inputs""" , __magic_name__ ):
return self.tokenizer._build_translation_inputs(
*__magic_name__ , return_tensors=self.framework , truncation=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
else:
return super()._parse_and_tokenize(*__magic_name__ , truncation=__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , **__magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case : List[Any] = super()._sanitize_parameters(**__magic_name__ )
if src_lang is not None:
__snake_case : int = src_lang
if tgt_lang is not None:
__snake_case : Any = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__snake_case : Tuple = kwargs.get("""task""" , self.task )
__snake_case : Union[str, Any] = task.split("""_""" )
if task and len(__magic_name__ ) == 4:
# translation, XX, to YY
__snake_case : List[Any] = items[1]
__snake_case : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , *__magic_name__ : int , **__magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
return super().__call__(*__magic_name__ , **__magic_name__ )
| 26 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _a ( ) -> Dict:
"""simple docstring"""
__snake_case : int = HfArgumentParser(_lowerCamelCase )
__snake_case : int = parser.parse_args_into_dataclasses()[0]
__snake_case : Optional[int] = TensorFlowBenchmark(args=_lowerCamelCase )
try:
__snake_case : Dict = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__snake_case : int = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
__snake_case : Dict = """ """.join(str(_lowerCamelCase ).split(""" """ )[:-1] )
__snake_case : List[Any] = """"""
__snake_case : Any = eval(str(_lowerCamelCase ).split(""" """ )[-1] )
__snake_case : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__snake_case : List[str] = full_error_msg + begin_error_msg + str(_lowerCamelCase )
raise ValueError(_lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__UpperCamelCase = datasets.logging.get_logger(__name__)
__UpperCamelCase = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__UpperCamelCase = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__UpperCamelCase = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def lowercase__ ( self : int , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if self.config_name == "default":
__snake_case : Tuple = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
__snake_case : Any = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : int=None , __magic_name__ : str=False ) -> List[str]:
"""simple docstring"""
if gpus is None:
__snake_case : List[str] = 1 if torch.cuda.is_available() else 0
__snake_case : int = {"""src""": sources, """mt""": predictions, """ref""": references}
__snake_case : Optional[Any] = [dict(zip(__magic_name__ , __magic_name__ ) ) for t in zip(*data.values() )]
__snake_case , __snake_case : List[Any] = self.scorer.predict(__magic_name__ , gpus=__magic_name__ , progress_bar=__magic_name__ )
return {"mean_score": mean_score, "scores": scores}
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : str = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : str = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : int = pentagonal_nums[j]
__snake_case : Tuple = pentagonal_i + pentagonal_j
__snake_case : str = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Optional[Any] = CTRLTokenizer
lowercase__: Dict = False
lowercase__: List[Any] = False
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Any = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
__snake_case : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
__snake_case : Optional[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
__snake_case : int = {"""unk_token""": """<unk>"""}
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowercase__ ( self : Union[str, Any] , **__magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowercase__ ( self : Tuple , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = """adapt react readapt apt"""
__snake_case : Tuple = """adapt react readapt apt"""
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : str = """adapt react readapt apt"""
__snake_case : List[str] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
__snake_case : List[str] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = tokens + [tokenizer.unk_token]
__snake_case : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
| 26 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["CLIPFeatureExtractor"]
__UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : List[str] = """"""
else:
__snake_case : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__snake_case : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Dict = in_proj_weight[
: config.hidden_size, :
]
__snake_case : str = in_proj_bias[: config.hidden_size]
__snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : str = in_proj_bias[-config.hidden_size :]
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Dict = dct.pop(_lowerCamelCase )
__snake_case : List[Any] = val
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
__snake_case : List[Any] = 8
# set labels if required
if not base_model:
__snake_case : Union[str, Any] = 1000
__snake_case : str = """huggingface/label-files"""
__snake_case : List[str] = """imagenet-1k-id2label.json"""
__snake_case : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__snake_case : List[str] = 384
__snake_case : Optional[Any] = 1536
__snake_case : Optional[int] = 12
__snake_case : Optional[Any] = 6
# load original model from torch hub
__snake_case : List[str] = torch.hub.load("""facebookresearch/dino:main""" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case : str = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
__snake_case : str = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
__snake_case : Union[str, Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
__snake_case : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__snake_case : List[str] = ViTImageProcessor()
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Any = encoding["""pixel_values"""]
__snake_case : Optional[int] = model(_lowerCamelCase )
if base_model:
__snake_case : List[Any] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__snake_case : Dict = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__UpperCamelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 26 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ) -> complex:
"""simple docstring"""
__snake_case : Union[str, Any] = symbols(_lowerCamelCase )
__snake_case : Union[str, Any] = lambdify(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : List[str] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
__snake_case : Optional[int] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__snake_case : str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 26 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 1 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
debug_launcher(test_script.main )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = tempfile.mkdtemp()
__snake_case : List[str] = BlipImageProcessor()
__snake_case : Tuple = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__snake_case : str = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
__snake_case : Dict = InstructBlipProcessor(__magic_name__ , __magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[Any] , **__magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def lowercase__ ( self : Optional[int] , **__magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def lowercase__ ( self : int , **__magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).qformer_tokenizer
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__snake_case : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__snake_case : Optional[Any] = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
__snake_case : Optional[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
self.assertIsInstance(processor.qformer_tokenizer , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : str = self.get_tokenizer()
__snake_case : str = self.get_qformer_tokenizer()
__snake_case : List[str] = InstructBlipProcessor(
tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ )
__snake_case : List[Any] = self.prepare_image_inputs()
__snake_case : Dict = image_processor(__magic_name__ , return_tensors="""np""" )
__snake_case : Any = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : str = self.get_qformer_tokenizer()
__snake_case : str = InstructBlipProcessor(
tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ )
__snake_case : List[Any] = """lower newer"""
__snake_case : List[str] = processor(text=__magic_name__ )
__snake_case : Union[str, Any] = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
__snake_case : Tuple = qformer_tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Optional[int] = self.get_qformer_tokenizer()
__snake_case : Tuple = InstructBlipProcessor(
tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ )
__snake_case : Union[str, Any] = """lower newer"""
__snake_case : List[str] = self.prepare_image_inputs()
__snake_case : int = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_qformer_tokenizer()
__snake_case : Dict = InstructBlipProcessor(
tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ )
__snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__magic_name__ )
__snake_case : str = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : Any = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : str = self.get_qformer_tokenizer()
__snake_case : Dict = InstructBlipProcessor(
tokenizer=__magic_name__ , image_processor=__magic_name__ , qformer_tokenizer=__magic_name__ )
__snake_case : Optional[int] = """lower newer"""
__snake_case : List[Any] = self.prepare_image_inputs()
__snake_case : Optional[Any] = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__UpperCamelCase = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _A ( __lowercase ):
def __init__( self : Tuple , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=1 ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = tokenizer
__snake_case : Dict = dataset
__snake_case : Optional[Any] = len(__magic_name__ ) if n_tasks is None else n_tasks
__snake_case : List[Any] = n_copies
def __iter__( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__snake_case : Optional[int] = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _A ( __lowercase ):
def __init__( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : int = start_length
__snake_case : Union[str, Any] = eof_strings
__snake_case : Dict = tokenizer
def __call__( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any] , **__magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__snake_case : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__magic_name__ )
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = re.split("""(%s)""" % """|""".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
__snake_case : int = batch["""ids"""].shape[-1]
__snake_case : Dict = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
__snake_case : int = batch["""task_id"""].repeat(_lowerCamelCase )
__snake_case : Tuple = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
__snake_case , __snake_case : Any = accelerator.gather((generated_tokens, generated_tasks) )
__snake_case : List[str] = generated_tokens.cpu().numpy()
__snake_case : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
__snake_case : List[str] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__snake_case : int = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : int = HfArgumentParser(_lowerCamelCase )
__snake_case : Dict = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__snake_case : Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__snake_case : int = """false"""
if args.num_workers is None:
__snake_case : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__snake_case : str = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
__snake_case : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
__snake_case : Dict = tokenizer.eos_token
__snake_case : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__snake_case : Optional[int] = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
__snake_case : Dict = load_dataset("""openai_humaneval""" )
__snake_case : int = load_metric("""code_eval""" )
__snake_case : int = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__snake_case : str = args.n_samples // args.batch_size
__snake_case : List[str] = TokenizedDataset(_lowerCamelCase , human_eval["""test"""] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
__snake_case : Dict = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__snake_case : int = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__snake_case , __snake_case : List[str] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[int] = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
__snake_case : str = []
for task in tqdm(range(_lowerCamelCase ) ):
__snake_case : Union[str, Any] = human_eval["""test"""][task]["""test"""]
__snake_case : List[str] = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__snake_case , __snake_case : Optional[int] = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCamelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : int = {}
with open(_lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : int = line.split()
__snake_case : Tuple = line_number
__snake_case : Optional[Any] = words[0]
__snake_case : str = value
return result
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
for attribute in key.split(""".""" ):
__snake_case : List[str] = getattr(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
__snake_case : Optional[int] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__snake_case : List[str] = """param"""
if weight_type is not None and weight_type != "param":
__snake_case : str = getattr(_lowerCamelCase , _lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : List[Any] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__snake_case : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = shape_pointer.shape
# let's reduce dimension
__snake_case : Union[str, Any] = value[0]
else:
__snake_case : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__snake_case : Any = value
elif weight_type == "weight_g":
__snake_case : Any = value
elif weight_type == "weight_v":
__snake_case : Dict = value
elif weight_type == "bias":
__snake_case : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__snake_case : Any = getattr(_lowerCamelCase , _lowerCamelCase )
__snake_case : Union[str, Any] = value
else:
__snake_case : Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
__snake_case : Union[str, Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__snake_case : Union[str, Any] = """param"""
if weight_type is not None and weight_type != "param":
__snake_case : List[Any] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : List[str] = """.""".join([key, hf_param_name] )
else:
__snake_case : List[Any] = key
__snake_case : Union[str, Any] = value if """lm_head""" in full_key else value[0]
__UpperCamelCase = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[int] = False
for key, mapped_key in MAPPING.items():
__snake_case : Union[str, Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : Optional[int] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__snake_case : List[str] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__snake_case : Optional[int] = """weight_g"""
elif "weight_v" in name:
__snake_case : Optional[int] = """weight_v"""
elif "bias" in name:
__snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : Any = """weight"""
else:
__snake_case : Optional[int] = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return is_used
return is_used
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Tuple = fairseq_model.state_dict()
__snake_case : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__snake_case : List[Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__snake_case : Tuple = name.split(""".""" )
__snake_case : Dict = int(items[0] )
__snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__snake_case : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__snake_case : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__snake_case : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__snake_case : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=False ) -> int:
"""simple docstring"""
if config_path is not None:
__snake_case : Dict = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case : Dict = WavaVecaConfig()
if is_seq_class:
__snake_case : str = read_txt_into_dict(_lowerCamelCase )
__snake_case : List[Any] = idalabel
__snake_case : Any = WavaVecaForSequenceClassification(_lowerCamelCase )
__snake_case : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Union[str, Any] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : int = len(target_dict.symbols )
__snake_case : Dict = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__snake_case : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : List[Any] = 0
__snake_case : Any = 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
__snake_case : Dict = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__snake_case : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
__snake_case : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case : int = WavaVecaForCTC(_lowerCamelCase )
else:
__snake_case : Optional[int] = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__snake_case : Optional[int] = argparse.Namespace(task="""audio_pretraining""" )
__snake_case : Dict = fairseq.tasks.setup_task(_lowerCamelCase )
__snake_case , __snake_case , __snake_case : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
__snake_case : Any = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 26 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__snake_case : List[Any] = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__snake_case : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
__snake_case : Tuple = primes[:idx]
break
__snake_case , __snake_case : Union[str, Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__snake_case : Tuple = False
for r in range(_lowerCamelCase ):
__snake_case : Tuple = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__snake_case : List[str] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _a ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 1 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__UpperCamelCase = logging.getLogger(__name__)
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[Any]=None ) -> str:
"""simple docstring"""
__snake_case : List[Any] = self.layer[current_layer](__magic_name__ , __magic_name__ , head_mask[current_layer] )
__snake_case : Any = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , __lowercase , )
class _A ( __lowercase ):
def __init__( self : Optional[Any] , __magic_name__ : str ) -> Dict:
"""simple docstring"""
super().__init__(__magic_name__ )
__snake_case : Optional[Any] = BertEncoderWithPabee(__magic_name__ )
self.init_weights()
__snake_case : Union[str, Any] = 0
__snake_case : Union[str, Any] = 0
__snake_case : Optional[int] = 0
__snake_case : List[str] = 0
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = threshold
def lowercase__ ( self : List[str] , __magic_name__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = patience
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : Any = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(__magic_name__ )
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : int=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[int]=False , ) -> str:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__snake_case : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : int = torch.ones(__magic_name__ , device=__magic_name__ )
if token_type_ids is None:
__snake_case : int = torch.zeros(__magic_name__ , dtype=torch.long , device=__magic_name__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(__magic_name__ , __magic_name__ , __magic_name__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : str = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : List[Any] = torch.ones(__magic_name__ , device=__magic_name__ )
__snake_case : Optional[Any] = self.invert_attention_mask(__magic_name__ )
else:
__snake_case : Any = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : List[Any] = self.get_head_mask(__magic_name__ , self.config.num_hidden_layers )
__snake_case : Optional[Any] = self.embeddings(
input_ids=__magic_name__ , position_ids=__magic_name__ , token_type_ids=__magic_name__ , inputs_embeds=__magic_name__ )
__snake_case : Optional[int] = embedding_output
if self.training:
__snake_case : Any = []
for i in range(self.config.num_hidden_layers ):
__snake_case : Any = self.encoder.adaptive_forward(
__magic_name__ , current_layer=__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ )
__snake_case : int = self.pooler(__magic_name__ )
__snake_case : Tuple = output_layers[i](output_dropout(__magic_name__ ) )
res.append(__magic_name__ )
elif self.patience == 0: # Use all layers for inference
__snake_case : int = self.encoder(
__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
__snake_case : Tuple = self.pooler(encoder_outputs[0] )
__snake_case : Optional[int] = [output_layers[self.config.num_hidden_layers - 1](__magic_name__ )]
else:
__snake_case : List[str] = 0
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
__magic_name__ , current_layer=__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ )
__snake_case : Union[str, Any] = self.pooler(__magic_name__ )
__snake_case : str = output_layers[i](__magic_name__ )
if regression:
__snake_case : str = logits.detach()
if patient_result is not None:
__snake_case : Any = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : int = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : Any = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__magic_name__ ) ):
patient_counter += 1
else:
__snake_case : Tuple = 0
__snake_case : Optional[int] = logits
if patient_counter == self.patience:
break
__snake_case : Tuple = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , __lowercase , )
class _A ( __lowercase ):
def __init__( self : Union[str, Any] , __magic_name__ : str ) -> Any:
"""simple docstring"""
super().__init__(__magic_name__ )
__snake_case : List[str] = config.num_labels
__snake_case : Union[str, Any] = BertModelWithPabee(__magic_name__ )
__snake_case : str = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Union[str, Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowercase__ ( self : List[str] , __magic_name__ : Dict=None , __magic_name__ : str=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : str=None , __magic_name__ : Dict=None , __magic_name__ : str=None , __magic_name__ : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = self.bert(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : Optional[int] = (logits[-1],)
if labels is not None:
__snake_case : List[str] = None
__snake_case : Optional[Any] = 0
for ix, logits_item in enumerate(__magic_name__ ):
if self.num_labels == 1:
# We are doing regression
__snake_case : Optional[int] = MSELoss()
__snake_case : List[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : Union[str, Any] = CrossEntropyLoss()
__snake_case : List[Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "spiece.model"}
__UpperCamelCase = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
__UpperCamelCase = "▁"
class _A ( __lowercase ):
lowercase__: Tuple = VOCAB_FILES_NAMES
lowercase__: List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]="</s>" , __magic_name__ : Optional[int]="<unk>" , __magic_name__ : Any="<pad>" , __magic_name__ : Any=1_00 , __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Dict[str, Any]] = None , __magic_name__ : List[Any]=True , **__magic_name__ : str , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__snake_case : List[str] = [f'''<extra_id_{i}>''' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__snake_case : Tuple = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
__snake_case : str = legacy
__snake_case : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , legacy=__magic_name__ , **__magic_name__ , )
__snake_case : Dict = vocab_file
__snake_case : List[Any] = extra_ids
__snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
@staticmethod
def lowercase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> Dict:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__snake_case : Optional[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __magic_name__ , )
return max_model_length
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
return list(
set(filter(lambda __magic_name__ : bool(re.search(r"""<extra_id_\d+>""" , __magic_name__ ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return [self._convert_token_to_id(__magic_name__ ) for token in self.get_sentinel_tokens()]
def lowercase__ ( self : str , __magic_name__ : List[int] ) -> List[int]:
"""simple docstring"""
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Dict = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
__snake_case : int = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def __getstate__( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Dict = None
return state
def __setstate__( self : int , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : int = {}
__snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : List[str] , __magic_name__ : "TextInput" , **__magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
if not self.legacy:
__snake_case : Dict = SPIECE_UNDERLINE + text.replace(__magic_name__ , """ """ )
return super().tokenize(__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : Union[str, Any] , **__magic_name__ : int ) -> Dict:
"""simple docstring"""
if not self.legacy:
__snake_case : Dict = text.startswith(__magic_name__ )
if is_first:
__snake_case : Union[str, Any] = text[1:]
__snake_case : List[str] = self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__magic_name__ ):
__snake_case : Any = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowercase__ ( self : Dict , __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if token.startswith("""<extra_id_""" ):
__snake_case : List[str] = re.match(r"""<extra_id_(\d+)>""" , __magic_name__ )
__snake_case : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__magic_name__ )
def lowercase__ ( self : Any , __magic_name__ : List[Any] ) -> Any:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__snake_case : List[str] = self.sp_model.IdToPiece(__magic_name__ )
else:
__snake_case : Any = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def lowercase__ ( self : Any , __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : List[Any] = """"""
__snake_case : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = []
else:
current_sub_tokens.append(__magic_name__ )
__snake_case : Optional[Any] = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : Any = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
__snake_case : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 26 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = None
lowercase__: Optional[int] = BloomTokenizerFast
lowercase__: Union[str, Any] = BloomTokenizerFast
lowercase__: int = True
lowercase__: Tuple = False
lowercase__: Union[str, Any] = '''tokenizer_file'''
lowercase__: List[str] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : List[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple , **__magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__snake_case : Tuple = self.get_rust_tokenizer()
__snake_case : Optional[int] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__snake_case : Any = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__snake_case : Optional[int] = tokenizer.batch_encode_plus(__magic_name__ )["""input_ids"""]
self.assertListEqual(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[int] , __magic_name__ : str=6 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__snake_case : List[Any] = """This is a simple input"""
__snake_case : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case : int = ("""This is a simple input""", """This is a pair""")
__snake_case : List[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.encode_plus(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.batch_encode_plus(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.encode(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.batch_encode_plus(__magic_name__ , max_length=__magic_name__ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__snake_case : Optional[int] = None # Hotfixing padding = None
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_rust_tokenizer()
__snake_case : List[Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=__magic_name__ )
__snake_case : str = next(iter(__magic_name__ ) )["""premise"""] # pick up one data
__snake_case : Tuple = list(sample_data.values() )
__snake_case : Optional[Any] = list(map(tokenizer.encode , __magic_name__ ) )
__snake_case : Optional[int] = [tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) for x in output_tokens]
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 26 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 1 |
import random
from typing import Any
def __lowercase ( snake_case ):
"""simple docstring"""
for _ in range(len(snake_case ) ):
__magic_name__ :Optional[int] = random.randint(0, len(snake_case ) - 1 )
__magic_name__ :Union[str, Any] = random.randint(0, len(snake_case ) - 1 )
__magic_name__ , __magic_name__ :List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE__ : int = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
_A = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0
UpperCAmelCase_ = True
UpperCAmelCase_ = False
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_A = chain(next_number(_snake_case ) )
_A = number_chain
while number < 10_000_000:
_A = number_chain
number *= 10
return number_chain
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_000_000 ) -> int:
for i in range(1 , _snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 2 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def A_( A : Union[str, Any] , A : Dict , A : Dict , A : Any):
UpperCamelCase = original_name.split('.')[0]
UpperCamelCase = key.split('.')
UpperCamelCase = int(key_list[key_list.index(A) - 2])
UpperCamelCase = int(key_list[key_list.index(A) - 1])
UpperCamelCase = orig_block_num - offset
UpperCamelCase = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''')
return key
def A_( A : Union[str, Any]):
UpperCamelCase = OrderedDict()
UpperCamelCase , UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network'):
UpperCamelCase = key.replace('network' , 'poolformer.encoder')
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias') and "patch_embed" not in key:
patch_emb_offset += 1
UpperCamelCase = key[: key.find('proj')]
UpperCamelCase = key.replace(A , f'''patch_embeddings.{total_embed_found}.''')
UpperCamelCase = key.replace('proj' , 'projection')
if key.endswith('bias'):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCamelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
UpperCamelCase = replace_key_with_offset(A , A , 'mlp.fc1' , 'output.conv1')
if "mlp.fc2" in key:
UpperCamelCase = replace_key_with_offset(A , A , 'mlp.fc2' , 'output.conv2')
if "norm1" in key:
UpperCamelCase = replace_key_with_offset(A , A , 'norm1' , 'before_norm')
if "norm2" in key:
UpperCamelCase = replace_key_with_offset(A , A , 'norm2' , 'after_norm')
if "layer_scale_1" in key:
UpperCamelCase = replace_key_with_offset(A , A , 'layer_scale_1' , 'layer_scale_1')
if "layer_scale_2" in key:
UpperCamelCase = replace_key_with_offset(A , A , 'layer_scale_2' , 'layer_scale_2')
if "head" in key:
UpperCamelCase = key.replace('head' , 'classifier')
UpperCamelCase = value
return new_state_dict
def A_( ):
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(A , stream=A).raw)
return image
@torch.no_grad()
def A_( A : Union[str, Any] , A : List[Any] , A : List[Any]):
UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = model_name[-3:]
UpperCamelCase = 1000
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = (1, 1000)
# set config attributes
UpperCamelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset') , 'r'))
UpperCamelCase = {int(A): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCamelCase = [2, 2, 6, 2]
UpperCamelCase = [64, 128, 320, 512]
UpperCamelCase = 4.0
UpperCamelCase = 0.9
elif size == "s24":
UpperCamelCase = [4, 4, 12, 4]
UpperCamelCase = [64, 128, 320, 512]
UpperCamelCase = 4.0
UpperCamelCase = 0.9
elif size == "s36":
UpperCamelCase = [6, 6, 18, 6]
UpperCamelCase = [64, 128, 320, 512]
UpperCamelCase = 4.0
UpperCamelCase = 1E-6
UpperCamelCase = 0.9
elif size == "m36":
UpperCamelCase = [6, 6, 18, 6]
UpperCamelCase = [96, 192, 384, 768]
UpperCamelCase = 4.0
UpperCamelCase = 1E-6
UpperCamelCase = 0.95
elif size == "m48":
UpperCamelCase = [8, 8, 24, 8]
UpperCamelCase = [96, 192, 384, 768]
UpperCamelCase = 4.0
UpperCamelCase = 1E-6
UpperCamelCase = 0.95
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor
UpperCamelCase = PoolFormerImageProcessor(crop_pct=A)
# Prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A , return_tensors='pt').pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
UpperCamelCase = torch.load(A , map_location=torch.device('cpu'))
# rename keys
UpperCamelCase = rename_keys(A)
# create HuggingFace model and load state dict
UpperCamelCase = PoolFormerForImageClassification(A)
model.load_state_dict(A)
model.eval()
# Define image processor
UpperCamelCase = PoolFormerImageProcessor(crop_pct=A)
UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt').pixel_values
# forward pass
UpperCamelCase = model(A)
UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCamelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869])
elif size == "s24":
UpperCamelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045])
elif size == "s36":
UpperCamelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898])
elif size == "m36":
UpperCamelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668])
elif size == "m48":
UpperCamelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423])
else:
raise ValueError(f'''Size {size} not supported''')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , A , atol=1E-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(A).mkdir(exist_ok=A)
model.save_pretrained(A)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 3 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__UpperCamelCase : Union[str, Any] = generate_large_matrix()
__UpperCamelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
assert all(row == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for row in grid )
assert all(list(_UpperCAmelCase ) == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for col in zip(*_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase = (left + right) // 2
lowerCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase = mid + 1
else:
lowerCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(grid[0] )
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCAmelCase ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
for row in grid:
for i, number in enumerate(_UpperCAmelCase ):
if number < 0:
total += len(_UpperCAmelCase ) - i
break
return total
def _SCREAMING_SNAKE_CASE ():
from timeit import timeit
print('Running benchmarks' )
lowerCAmelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase = timeit(F'{func}(grid=grid)' , setup=_UpperCAmelCase , number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 4 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 0 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = 50 , _lowercase = "pil" , _lowercase = True , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowercase , )
_lowerCAmelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowercase ), "This is a local test"
| 5 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: ndarray ):
return np.dot(UpperCamelCase__ , UpperCamelCase__ )
class UpperCamelCase_ :
def __init__( self :int , *,
__A :float = np.inf , __A :str = "linear" , __A :float = 0.0 , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = regularization
SCREAMING_SNAKE_CASE__ = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
SCREAMING_SNAKE_CASE__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE__ = f'''Unknown kernel: {kernel}'''
raise ValueError(__A )
def _snake_case ( self :Optional[Any] , __A :ndarray , __A :ndarray ) -> float:
"""simple docstring"""
return np.dot(__A , __A )
def _snake_case ( self :Union[str, Any] , __A :ndarray , __A :ndarray ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self :Union[str, Any] , __A :list[ndarray] , __A :ndarray ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = observations
SCREAMING_SNAKE_CASE__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE__) , ) = np.shape(__A )
def to_minimize(__A :ndarray ) -> float:
SCREAMING_SNAKE_CASE__ = 0
((SCREAMING_SNAKE_CASE__) , ) = np.shape(__A )
for i in range(__A ):
for j in range(__A ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__A )
SCREAMING_SNAKE_CASE__ = LinearConstraint(__A , 0 , 0 )
SCREAMING_SNAKE_CASE__ = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE__ = minimize(
__A , np.ones(__A ) , bounds=__A , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE__ = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE__ = 0
for i in range(__A ):
for j in range(__A ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE__ = s / n
def _snake_case ( self :Optional[int] , __A :ndarray ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __A )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = ShapEPipeline
UpperCAmelCase : Union[str, Any] = ['''prompt''']
UpperCAmelCase : Union[str, Any] = ['''prompt''']
UpperCAmelCase : List[str] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase : Optional[Any] = False
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 32
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return 32
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : str ):
return 8
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
_A = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_A = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCAmelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_A = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_A = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.dummy_prior
_A = self.dummy_text_encoder
_A = self.dummy_tokenizer
_A = self.dummy_renderer
_A = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
_A = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self : int ):
_A = 'cpu'
_A = self.get_dummy_components()
_A = self.pipeline_class(**_UpperCAmelCase )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
_A = output.images[0]
_A = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ ( self : Tuple ):
_A = torch_device == 'cpu'
_A = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = self.get_dummy_components()
_A = self.pipeline_class(**_UpperCAmelCase )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = 1
_A = 2
_A = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_A = batch_size * [inputs[key]]
_A = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Tuple ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_A = ShapEPipeline.from_pretrained('openai/shap-e' )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
_A = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 7 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class SCREAMING_SNAKE_CASE (a__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase = Features({'''text''': Value('''string''' )} )
lowerCAmelCase = Features({'''summary''': Value('''string''' )} )
lowerCAmelCase = "text"
lowerCAmelCase = "summary"
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"} | 8 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : AutoencoderKL , _snake_case : CLIPTextModel , _snake_case : CLIPTokenizer , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _snake_case : StableDiffusionSafetyChecker , _snake_case : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , )
def _a ( self : List[Any] , _snake_case : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def _a ( self : str ):
"""simple docstring"""
self.enable_attention_slicing(_snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , _snake_case : Union[str, List[str]] , _snake_case : int = 5_12 , _snake_case : int = 5_12 , _snake_case : int = 50 , _snake_case : float = 7.5 , _snake_case : Optional[Union[str, List[str]]] = None , _snake_case : Optional[int] = 1 , _snake_case : float = 0.0 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _snake_case : int = 1 , _snake_case : Optional[torch.FloatTensor] = None , **_snake_case : List[str] , ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
A__ = 1
elif isinstance(_snake_case , _snake_case ):
A__ = len(_snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_snake_case )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_snake_case )}.''' )
# get prompt text embeddings
A__ = self.tokenizer(
_snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ = text_embeddings.shape
A__ = text_embeddings.repeat(1 , _snake_case , 1 )
A__ = text_embeddings.view(bs_embed * num_images_per_prompt , _snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = ['']
elif type(_snake_case ) is not type(_snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case )} !='''
F''' {type(_snake_case )}.''' )
elif isinstance(_snake_case , _snake_case ):
A__ = [negative_prompt]
elif batch_size != len(_snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
A__ = negative_prompt
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
_snake_case , padding='max_length' , max_length=_snake_case , truncation=_snake_case , return_tensors='pt' , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = uncond_embeddings.shape[1]
A__ = uncond_embeddings.repeat(_snake_case , _snake_case , 1 )
A__ = uncond_embeddings.view(batch_size * num_images_per_prompt , _snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ = torch.randn(
_snake_case , generator=_snake_case , device='cpu' , dtype=_snake_case ).to(self.device )
A__ = torch.randn(_snake_case , generator=_snake_case , device='cpu' , dtype=_snake_case ).to(
self.device )
else:
A__ = torch.randn(
_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
A__ = torch.randn(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
A__ = latents_reference.to(self.device )
A__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A__ = (latents_shape[3] - latents_shape_reference[3]) // 2
A__ = (latents_shape[2] - latents_shape_reference[2]) // 2
A__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A__ = 0 if dx < 0 else dx
A__ = 0 if dy < 0 else dy
A__ = max(-dx , 0 )
A__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
A__ = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case , _snake_case , _snake_case )
A__ = 1 / 0.1_8215 * latents
A__ = self.vae.decode(_snake_case ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A__ = self.feature_extractor(self.numpy_to_pil(_snake_case ) , return_tensors='pt' ).to(
self.device )
A__ , A__ = self.safety_checker(
images=_snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A__ = None
if output_type == "pil":
A__ = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_snake_case , nsfw_content_detected=_snake_case )
| 9 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 0 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case , __snake_case , __snake_case ):
def get_masked_lm_array(__snake_case ):
_UpperCamelCase = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_array(__snake_case ):
_UpperCamelCase = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_layer_array(__snake_case , __snake_case ):
_UpperCamelCase = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_attention_layer_array(__snake_case , __snake_case , __snake_case ):
_UpperCamelCase = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
_UpperCamelCase = array.reshape(__snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
print(f"""Loading model based on config from {config_path}...""" )
_UpperCamelCase = BertConfig.from_json_file(__snake_case )
_UpperCamelCase = BertForMaskedLM(__snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_UpperCamelCase = model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCamelCase = layer.attention.self
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCamelCase = layer.attention.output
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_attention_layer_norm/gamma''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
_UpperCamelCase = layer.intermediate
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_intermediate_dense/kernel''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_intermediate_dense/bias''' )
# Output
_UpperCamelCase = layer.output
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_dense/kernel''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_dense/bias''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_layer_norm/gamma''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_layer_norm/beta''' )
# Embeddings
_UpperCamelCase = get_encoder_array('''_position_embedding_layer/embeddings''' )
_UpperCamelCase = get_encoder_array('''_type_embedding_layer/embeddings''' )
_UpperCamelCase = get_encoder_array('''_embedding_norm_layer/gamma''' )
_UpperCamelCase = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
_UpperCamelCase = model.cls.predictions.transform
_UpperCamelCase = get_masked_lm_array('''dense/kernel''' )
_UpperCamelCase = get_masked_lm_array('''dense/bias''' )
_UpperCamelCase = get_masked_lm_array('''layer_norm/gamma''' )
_UpperCamelCase = get_masked_lm_array('''layer_norm/beta''' )
_UpperCamelCase = get_masked_lm_array('''embedding_table''' )
# Pooling
_UpperCamelCase = BertPooler(config=__snake_case )
_UpperCamelCase = get_encoder_array('''_pooler_layer/kernel''' )
_UpperCamelCase = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(__snake_case )
# Integration test - should load without any errors ;)
_UpperCamelCase = BertForMaskedLM.from_pretrained(__snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 10 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 'convbert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = embedding_size
lowercase__ : List[str] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Dict = num_groups
lowercase__ : int = classifier_dropout
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 12 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( __a : List[str] ) -> Tuple:
"""simple docstring"""
_a : List[str] = {}
_a : int = os.path.join(__a ,'''all_results.json''' )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
_a : Any = json.load(__a )
else:
raise ValueError(F"""can't find {path}""" )
return results
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[Any]:
import xla_spawn
_a : Union[str, Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
_a : List[str] = time()
xla_spawn.main()
_a : Tuple = time()
_a : Tuple = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def __lowercase ( self ) -> Dict:
import xla_spawn
_a : str = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(_a , '''argv''' , _a ):
xla_spawn.main()
| 14 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 0 |
import copy
import re
class A :
'''simple docstring'''
A__ = '''hp'''
A__ = {}
A__ = None
@classmethod
def lowerCamelCase__ (cls : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = prefix
lowercase__ = defaults
cls.build_naming_info()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
if len(_UpperCAmelCase ) == 0:
return ""
lowercase__ = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ):
lowercase__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowercase__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCAmelCase : Union[str, Any] ):
lowercase__ = """"""
while integer != 0:
lowercase__ = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
lowercase__ = 0
while True:
lowercase__ = word + """#""" + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
lowercase__ = sword
break
lowercase__ = short_word
lowercase__ = word
return short_word
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = param_name.split("""_""" )
lowercase__ = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowercase__ = ["""""", """_"""]
for separator in separators:
lowercase__ = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
lowercase__ = shortname
lowercase__ = param_name
return shortname
return param_name
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = short_name
lowercase__ = param_name
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
lowercase__ = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
lowercase__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = info
@classmethod
def lowerCamelCase__ (cls : str , _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
lowercase__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowercase__ = cls.NAMING_INFO["""short_param"""][k]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = 1 if v else 0
lowercase__ = """""" if isinstance(_UpperCAmelCase , (int, float) ) else """-"""
lowercase__ = f'''{key}{sep}{v}'''
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowercase__ = []
else:
lowercase__ = repr.split("""_""" )
lowercase__ = {}
for value in values:
if "-" in value:
lowercase__ , lowercase__ = value.split("""-""" )
else:
lowercase__ = re.sub("""[0-9.]""" , """""" , _UpperCAmelCase )
lowercase__ = float(re.sub("""[^0-9.]""" , """""" , _UpperCAmelCase ) )
lowercase__ = cls.NAMING_INFO["""reverse_short_param"""][p_k]
lowercase__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowercase__ = cls.DEFAULTS[k]
return parameters
| 15 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 0 |
from typing import Any
import numpy as np
def __a ( A__ : np.ndarray ):
return np.array_equal(A__ , matrix.conjugate().T )
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = v.conjugate().T
SCREAMING_SNAKE_CASE = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __a ( ):
SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
print(rayleigh_quotient(A__ , A__ ) )
SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"{a} is not hermitian."
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 16 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 0 |
import numpy as np
class lowerCamelCase_ :
def __init__( self : Union[str, Any] ):
__A : Union[str, Any] = (0, 0)
__A : Optional[Any] = None
__A : int = 0
__A : List[Any] = 0
__A : Any = 0
def __eq__( self : str , __A : Dict ):
return self.position == cell.position
def lowerCAmelCase_ ( self : int ):
print(self.position )
class lowerCamelCase_ :
def __init__( self : Dict , __A : List[str]=(5, 5) ):
__A : str = np.zeros(__A )
__A : str = world_size[0]
__A : Union[str, Any] = world_size[1]
def lowerCAmelCase_ ( self : Dict ):
print(self.w )
def lowerCAmelCase_ ( self : int , __A : str ):
__A : int = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__A : Optional[int] = cell.position[0]
__A : List[str] = cell.position[1]
__A : Optional[int] = []
for n in neughbour_cord:
__A : Optional[int] = current_x + n[0]
__A : int = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__A : int = Cell()
__A : Optional[Any] = (x, y)
__A : Optional[Any] = cell
neighbours.append(__A )
return neighbours
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Optional[int] ,a__ : List[str] ) -> Any:
__A : Dict = []
__A : Dict = []
_open.append(a__ )
while _open:
__A : List[str] = np.argmin([n.f for n in _open] )
__A : Dict = _open[min_f]
_closed.append(_open.pop(a__ ) )
if current == goal:
break
for n in world.get_neigbours(a__ ):
for c in _closed:
if c == n:
continue
__A : Tuple = current.g + 1
__A , __A : int = n.position
__A , __A : str = goal.position
__A : Optional[int] = (ya - ya) ** 2 + (xa - xa) ** 2
__A : Any = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a__ )
__A : Any = []
while current.parent is not None:
path.append(current.position )
__A : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = Gridworld()
# Start position and goal
UpperCAmelCase_ : Any = Cell()
UpperCAmelCase_ : Tuple = (0, 0)
UpperCAmelCase_ : Tuple = Cell()
UpperCAmelCase_ : str = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
UpperCAmelCase_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ : Optional[int] = 1
print(world.w)
| 17 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 0 ):
'''simple docstring'''
_lowerCAmelCase = length or len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCAmelCase , _lowerCAmelCase = list_data[i + 1], list_data[i]
_lowerCAmelCase = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
_UpperCamelCase = [trans(img.convert('''RGB''' ) ) for img in image]
_UpperCamelCase = torch.stack(__snake_case )
return image
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> Dict:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCamelCase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=__a , scheduler=__a)
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> int:
'''simple docstring'''
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__a)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_UpperCamelCase = init_latents.shape
_UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a)
# get latents
print('''add noise to latents at timestep''' , __a)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __a = None , __a = 0.8 , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = None , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__a)
# 2. Preprocess image
_UpperCamelCase = preprocess(__a)
# 3. set timesteps
self.scheduler.set_timesteps(__a , device=self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# 4. Prepare latent variables
_UpperCamelCase = self.prepare_latents(__a , __a , __a , self.unet.dtype , self.device , __a)
_UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__a):
# 1. predict noise model_output
_UpperCamelCase = self.unet(__a , __a).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a , ).prev_sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__a)
| 19 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =XLMRobertaModel.from_pretrained('xlm-roberta-base')
a__ =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
a__ =torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
a__ =torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ =model(lowercase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3))
@slow
def __UpperCamelCase ( self) -> Tuple:
a__ =XLMRobertaModel.from_pretrained('xlm-roberta-large')
a__ =torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
a__ =torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
a__ =torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a__ =model(lowercase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , lowercase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3))
| 20 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Optional[Any] , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :float , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :str , __snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
__magic_name__ : List[str] =nn.Embedding(__snake_case , __snake_case )
__magic_name__ : int =nn.Embedding(__snake_case , __snake_case )
__magic_name__ : Tuple =False
__magic_name__ : List[str] =nn.Dropout(p=__snake_case )
__magic_name__ : Union[str, Any] =TaConfig(
vocab_size=__snake_case , d_model=__snake_case , num_heads=__snake_case , d_kv=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , feed_forward_proj=__snake_case , is_decoder=__snake_case , is_encoder_decoder=__snake_case , )
__magic_name__ : List[Any] =nn.ModuleList()
for lyr_num in range(__snake_case ):
__magic_name__ : Any =TaBlock(__snake_case )
self.encoders.append(__snake_case )
__magic_name__ : Union[str, Any] =TaLayerNorm(__snake_case )
__magic_name__ : str =nn.Dropout(p=__snake_case )
def A__ ( self :List[Any] , __snake_case :Union[str, Any] , __snake_case :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =self.token_embedder(__snake_case )
__magic_name__ : List[Any] =encoder_input_tokens.shape[1]
__magic_name__ : Any =torch.arange(__snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(__snake_case )
__magic_name__ : Optional[int] =self.dropout_pre(__snake_case )
# inverted the attention mask
__magic_name__ : Optional[int] =encoder_input_tokens.size()
__magic_name__ : Optional[int] =self.get_extended_attention_mask(__snake_case , __snake_case )
for lyr in self.encoders:
__magic_name__ : Union[str, Any] =lyr(__snake_case , __snake_case )[0]
__magic_name__ : int =self.layer_norm(__snake_case )
return self.dropout_post(__snake_case ), encoder_inputs_mask
| 21 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=99 , lowerCAmelCase_ : List[Any]=24 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Union[str, Any]=6 , lowerCAmelCase_ : Tuple=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[Any]=5_12 , lowerCAmelCase_ : Tuple=16 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=0.0_2 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=10_00 , ) -> int:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = scope
_a = range_bbox
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a = bbox[i, j, 3]
_a = bbox[i, j, 1]
_a = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_a = bbox[i, j, 2]
_a = bbox[i, j, 0]
_a = t
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , ) -> Optional[int]:
"""simple docstring"""
_a = LiltModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , ) -> Any:
"""simple docstring"""
_a = self.num_labels
_a = LiltForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_a = LiltForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
return True
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = LiltModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = LiltModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@slow
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_a = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(lowerCAmelCase_ )
_a = torch.tensor([[1, 2]] , device=lowerCAmelCase_ )
_a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ )
_a = torch.Size([1, 2, 7_68] )
_a = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCAmelCase_ , atol=1e-3 ) )
| 22 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
snake_case__ : str = logging.getLogger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """summarization"""
A_ = ["""loss"""]
A_ = ROUGE_KEYS
A_ = """rouge2"""
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCamelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(_UpperCAmelCase , num_labels=_UpperCAmelCase , mode=self.mode , **_UpperCAmelCase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
UpperCamelCase_ = Path(self.output_dir ) / 'metrics.json'
UpperCamelCase_ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
UpperCamelCase_ = 0
UpperCamelCase_ = defaultdict(_UpperCAmelCase )
UpperCamelCase_ = self.config.model_type
UpperCamelCase_ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
UpperCamelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCamelCase_ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
UpperCamelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCamelCase_ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCamelCase_ = get_git_info()['repo_sha']
UpperCamelCase_ = hparams.num_workers
UpperCamelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCAmelCase ):
UpperCamelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCamelCase_ = self.decoder_start_token_id
UpperCamelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
UpperCamelCase_ = False
UpperCamelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCamelCase_ = self.hparams.eval_max_gen_length
else:
UpperCamelCase_ = self.model.config.max_length
UpperCamelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict[str, List[str]]:
UpperCamelCase_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCAmelCase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
UpperCamelCase_ = True
return readable_batch
def _UpperCAmelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.model(_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
UpperCamelCase_ = self.tokenizer.batch_decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return lmap(str.strip , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = self.tokenizer.pad_token_id
UpperCamelCase_ , UpperCamelCase_ = batch['input_ids'], batch['attention_mask']
UpperCamelCase_ = batch['labels']
if isinstance(self.model , _UpperCAmelCase ):
UpperCamelCase_ = self.model._shift_right(_UpperCAmelCase )
else:
UpperCamelCase_ = shift_tokens_right(_UpperCAmelCase , _UpperCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCamelCase_ = decoder_input_ids
self.save_readable_batch(_UpperCAmelCase )
UpperCamelCase_ = self(_UpperCAmelCase , attention_mask=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , use_cache=_UpperCAmelCase )
UpperCamelCase_ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCamelCase_ = nn.CrossEntropyLoss(ignore_index=_UpperCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCamelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCamelCase_ = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
UpperCamelCase_ , UpperCamelCase_ = label_smoothed_nll_loss(
_UpperCAmelCase , _UpperCAmelCase , self.hparams.label_smoothing , ignore_index=_UpperCAmelCase )
return (loss,)
@property
def _UpperCAmelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = self._step(_UpperCAmelCase )
UpperCamelCase_ = dict(zip(self.loss_names , _UpperCAmelCase ) )
# tokens per batch
UpperCamelCase_ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
UpperCamelCase_ = batch['input_ids'].shape[0]
UpperCamelCase_ = batch['input_ids'].eq(self.pad ).sum()
UpperCamelCase_ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
return self._generative_step(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase="val" ) -> Dict:
self.step_count += 1
UpperCamelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCamelCase_ = losses['loss']
UpperCamelCase_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
UpperCamelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).type_as(_UpperCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCAmelCase )
UpperCamelCase_ = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
UpperCamelCase_ = self.step_count
self.metrics[prefix].append(_UpperCAmelCase ) # callback writes this to self.metrics_save_path
UpperCamelCase_ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
return calculate_rouge(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> dict:
UpperCamelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCamelCase_ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=_UpperCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCamelCase_ = (time.time() - ta) / batch['input_ids'].shape[0]
UpperCamelCase_ = self.ids_to_clean_text(_UpperCAmelCase )
UpperCamelCase_ = self.ids_to_clean_text(batch['labels'] )
UpperCamelCase_ = self._step(_UpperCAmelCase )
UpperCamelCase_ = dict(zip(self.loss_names , _UpperCAmelCase ) )
UpperCamelCase_ = self.calc_generative_metrics(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = np.mean(lmap(_UpperCAmelCase , _UpperCAmelCase ) )
base_metrics.update(gen_time=_UpperCAmelCase , gen_len=_UpperCAmelCase , preds=_UpperCAmelCase , target=_UpperCAmelCase , **_UpperCAmelCase )
return base_metrics
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> str:
return self._generative_step(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
return self.validation_epoch_end(_UpperCAmelCase , prefix='test' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> SeqaSeqDataset:
UpperCamelCase_ = self.n_obs[type_path]
UpperCamelCase_ = self.target_lens[type_path]
UpperCamelCase_ = self.dataset_class(
self.tokenizer , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , max_target_length=_UpperCAmelCase , **self.dataset_kwargs , )
return dataset
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ) -> DataLoader:
UpperCamelCase_ = self.get_dataset(_UpperCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCamelCase_ = dataset.make_sortish_sampler(_UpperCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCAmelCase , num_workers=self.num_workers , sampler=_UpperCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCamelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCAmelCase , batch_sampler=_UpperCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCAmelCase , num_workers=self.num_workers , sampler=_UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> DataLoader:
UpperCamelCase_ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=_UpperCAmelCase )
return dataloader
def _UpperCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def _UpperCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
BaseTransformer.add_model_specific_args(_UpperCAmelCase , _UpperCAmelCase )
add_generic_args(_UpperCAmelCase , _UpperCAmelCase )
parser.add_argument(
'--max_source_length' , default=1024 , type=_UpperCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=_UpperCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=_UpperCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=_UpperCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=_UpperCAmelCase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=_UpperCAmelCase )
parser.add_argument('--max_tokens_per_batch' , type=_UpperCAmelCase , default=_UpperCAmelCase )
parser.add_argument('--logger_name' , type=_UpperCAmelCase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=_UpperCAmelCase , default=500 , required=_UpperCAmelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=_UpperCAmelCase , default='summarization' , required=_UpperCAmelCase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=_UpperCAmelCase , default=0.0 , required=_UpperCAmelCase )
parser.add_argument('--src_lang' , type=_UpperCAmelCase , default='' , required=_UpperCAmelCase )
parser.add_argument('--tgt_lang' , type=_UpperCAmelCase , default='' , required=_UpperCAmelCase )
parser.add_argument('--eval_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument(
'--val_metric' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """translation"""
A_ = ["""loss"""]
A_ = ["""bleu"""]
A_ = """bleu"""
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
super().__init__(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = hparams.src_lang
UpperCamelCase_ = hparams.tgt_lang
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> dict:
return calculate_bleu(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case (__lowercase , __lowercase=None):
Path(args.output_dir).mkdir(exist_ok=__lowercase)
check_output_dir(__lowercase , expected_items=3)
if model is None:
if "summarization" in args.task:
UpperCamelCase_ = SummarizationModule(__lowercase)
else:
UpperCamelCase_ = TranslationModule(__lowercase)
UpperCamelCase_ = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith('/tmp')
or str(args.output_dir).startswith('/var')
):
UpperCamelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase_ = os.environ.get('WANDB_PROJECT' , __lowercase)
UpperCamelCase_ = WandbLogger(name=model.output_dir.name , project=__lowercase)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase_ = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""")
if args.early_stopping_patience >= 0:
UpperCamelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience)
else:
UpperCamelCase_ = False
UpperCamelCase_ = args.val_metric == 'loss'
UpperCamelCase_ = generic_train(
__lowercase , __lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __lowercase) , early_stopping_callback=__lowercase , logger=__lowercase , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl')
if not args.do_predict:
return model
UpperCamelCase_ = ''
UpperCamelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt') , recursive=__lowercase))
if checkpoints:
UpperCamelCase_ = checkpoints[-1]
UpperCamelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams)
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
snake_case__ : Dict = pl.Trainer.add_argparse_args(parser)
snake_case__ : List[str] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
snake_case__ : Optional[Any] = parser.parse_args()
main(args)
| 23 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 88 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = None , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__snake_case = [1, 0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = True , ) -> List[str]:
'''simple docstring'''
__snake_case = hidden_states
__snake_case = []
__snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__snake_case = self.transformer_index_for_condition[i]
__snake_case = self.transformers[transformer_index](
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
a_ = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
a_ = '▁'
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =BigBirdTokenizer
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =[]
def __init__( self : str , a : int=None , a : Optional[int]=None , a : Union[str, Any]="<unk>" , a : int="<s>" , a : Any="</s>" , a : List[str]="<pad>" , a : List[Any]="[SEP]" , a : Optional[Any]="[MASK]" , a : Optional[int]="[CLS]" , **a : List[str] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
SCREAMING_SNAKE_CASE : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
SCREAMING_SNAKE_CASE : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : Tuple = False if not self.vocab_file else True
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def __UpperCamelCase ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 25 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ ):
_A = data
_A = [0X6745_2301, 0XEFCD_AB89, 0X98BA_DCFE, 0X1032_5476, 0XC3D2_E1F0]
@staticmethod
def lowerCAmelCase__ ( snake_case_ , snake_case_ ):
return ((n << b) | (n >> (32 - b))) & 0XFFFF_FFFF
def lowerCAmelCase__ ( self ):
_A = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
_A = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def lowerCAmelCase__ ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowerCAmelCase__ ( self , snake_case_ ):
_A = list(struct.unpack('>16L' , snake_case_ ) ) + [0] * 64
for i in range(16 , 80 ):
_A = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowerCAmelCase__ ( self ):
_A = self.padding()
_A = self.split_blocks()
for block in self.blocks:
_A = self.expand_block(snake_case_ )
_A, _A, _A, _A, _A = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_A = (b & c) | ((~b) & d)
_A = 0X5A82_7999
elif 20 <= i < 40:
_A = b ^ c ^ d
_A = 0X6ED9_EBA1
elif 40 <= i < 60:
_A = (b & c) | (b & d) | (c & d)
_A = 0X8F1B_BCDC
elif 60 <= i < 80:
_A = b ^ c ^ d
_A = 0XCA62_C1D6
_A, _A, _A, _A, _A = (
self.rotate(snake_case_ , 5 ) + f + e + k + expanded_block[i] & 0XFFFF_FFFF,
a,
self.rotate(snake_case_ , 30 ),
c,
d,
)
_A = (
self.h[0] + a & 0XFFFF_FFFF,
self.h[1] + b & 0XFFFF_FFFF,
self.h[2] + c & 0XFFFF_FFFF,
self.h[3] + d & 0XFFFF_FFFF,
self.h[4] + e & 0XFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCAmelCase( ) -> Any:
"""simple docstring"""
_A = b'Test String'
assert SHAaHash(_SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(_SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324
def __lowerCAmelCase( ) -> int:
"""simple docstring"""
_A = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
_A = parser.parse_args()
_A = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_A = f.read()
else:
_A = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaHash(_SCREAMING_SNAKE_CASE ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase__( __UpperCamelCase: list[int] ): # This function is recursive
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = len(__UpperCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE : Optional[Any] = array[0]
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[int] = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE : List[str] = longest_subsequence(__UpperCamelCase )
if len(__UpperCamelCase ) > len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE : Tuple = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE : Optional[Any] = [pivot, *longest_subsequence(__UpperCamelCase )]
if len(__UpperCamelCase ) > len(__UpperCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __lowerCamelCase ( torch.nn.Module ):
def __init__( self , UpperCAmelCase="sayef/fsner-bert-base-uncased" ):
super(UpperCAmelCase , self ).__init__()
lowerCamelCase_ = AutoModel.from_pretrained(UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCamelCase_ = torch.nn.CosineSimilarity(3 , 1e-0_8 )
lowerCamelCase_ = torch.nn.Softmax(dim=1 )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return self.bert(**UpperCAmelCase ).last_hidden_state
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return token_embeddings.sum(2 , keepdim=UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1 ):
return self.softmax(T * self.cos(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = W_supports['''sizes'''].tolist()
lowerCamelCase_ = W_supports['''start_token_id'''].item()
lowerCamelCase_ = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCamelCase_ = self.BERT(**UpperCAmelCase )
lowerCamelCase_ = self.BERT(**UpperCAmelCase )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = W_supports['''input_ids'''] == start_token_id
lowerCamelCase_ = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(UpperCAmelCase ):
if i == 0:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = support_sizes[i - 1]
lowerCamelCase_ = S[s : s + size][start_token_masks[s : s + size]]
lowerCamelCase_ = S[s : s + size][end_token_masks[s : s + size]]
lowerCamelCase_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCamelCase_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCamelCase_ = torch.vstack((p_starts, p_start) )
lowerCamelCase_ = torch.vstack((p_ends, p_end) )
else:
lowerCamelCase_ = p_start
lowerCamelCase_ = p_end
return p_starts, p_ends
| 29 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 0.00
UpperCAmelCase_ : Tuple = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase_ : List[Any] = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_lowercase )
first_sum += 1 / float(_lowercase )
index += 1
return 1 / first_sum
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 0.00
UpperCAmelCase_ : str = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase_ : Tuple = f'''Resistor at index {index} has a negative value!'''
raise ValueError(_lowercase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = torch.exp(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.sum(__UpperCAmelCase , dim=1 ) # sum of exp(x_i)
SCREAMING_SNAKE_CASE_ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCAmelCase ) - B / A
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : List[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE_ = config.output_attentions
SCREAMING_SNAKE_CASE_ = config.output_hidden_states
SCREAMING_SNAKE_CASE_ = nn.ModuleList([BertLayer(_lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE_ = nn.ModuleList([BertHighway(_lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE_ = [-1 for _ in range(config.num_hidden_layers )]
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[Any] ):
if (type(_lowerCAmelCase ) is float) or (type(_lowerCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
SCREAMING_SNAKE_CASE_ = x
else:
SCREAMING_SNAKE_CASE_ = x
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
SCREAMING_SNAKE_CASE_ = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_ = layer_module(
_lowerCAmelCase , _lowerCAmelCase , head_mask[i] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = layer_outputs[0]
if self.output_attentions:
SCREAMING_SNAKE_CASE_ = all_attentions + (layer_outputs[1],)
SCREAMING_SNAKE_CASE_ = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE_ = current_outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE_ = current_outputs + (all_attentions,)
SCREAMING_SNAKE_CASE_ = self.highway[i](_lowerCAmelCase )
# logits, pooled_output
if not self.training:
SCREAMING_SNAKE_CASE_ = highway_exit[0]
SCREAMING_SNAKE_CASE_ = entropy(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
SCREAMING_SNAKE_CASE_ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
SCREAMING_SNAKE_CASE_ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_lowerCAmelCase , i + 1 )
else:
SCREAMING_SNAKE_CASE_ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
SCREAMING_SNAKE_CASE_ = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_ = (hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE_ = outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE_ = outputs + (all_attentions,)
SCREAMING_SNAKE_CASE_ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , _SCREAMING_SNAKE_CASE , )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Tuple ):
super().__init__(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = BertEmbeddings(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = DeeBertEncoder(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BertPooler(_lowerCAmelCase )
self.init_weights()
def lowerCAmelCase_ ( self : int ):
self.encoder.init_highway_pooler(self.pooler )
def lowerCAmelCase_ ( self : Optional[int] ):
return self.embeddings.word_embeddings
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = value
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Tuple ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_lowerCAmelCase )
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : str=None , _lowerCAmelCase : int=None , _lowerCAmelCase : str=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE_ = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
SCREAMING_SNAKE_CASE_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = torch.ones(_lowerCAmelCase , device=_lowerCAmelCase )
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = torch.ones(_lowerCAmelCase , device=_lowerCAmelCase )
if token_type_ids is None:
SCREAMING_SNAKE_CASE_ = torch.zeros(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
SCREAMING_SNAKE_CASE_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
SCREAMING_SNAKE_CASE_ = encoder_attention_mask[:, None, None, :]
SCREAMING_SNAKE_CASE_ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
SCREAMING_SNAKE_CASE_ = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE_ = self.get_head_mask(_lowerCAmelCase , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE_ = self.embeddings(
input_ids=_lowerCAmelCase , position_ids=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.encoder(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ = self.pooler(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = message
SCREAMING_SNAKE_CASE_ = exit_layer # start from 1!
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : int ):
super().__init__()
SCREAMING_SNAKE_CASE_ = BertPooler(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.num_labels )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Optional[Any] ):
# Pooler
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ = self.pooler(_lowerCAmelCase )
# "return" pooler_output
# BertModel
SCREAMING_SNAKE_CASE_ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
SCREAMING_SNAKE_CASE_ = bmodel_output[1]
SCREAMING_SNAKE_CASE_ = self.dropout(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.classifier(_lowerCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , _SCREAMING_SNAKE_CASE , )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : List[str] ):
super().__init__(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = config.num_hidden_layers
SCREAMING_SNAKE_CASE_ = DeeBertModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]=-1 , _lowerCAmelCase : List[str]=False , ):
SCREAMING_SNAKE_CASE_ = self.num_layers
try:
SCREAMING_SNAKE_CASE_ = self.bert(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , position_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
SCREAMING_SNAKE_CASE_ = outputs[1]
SCREAMING_SNAKE_CASE_ = self.dropout(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.classifier(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE_ = e.message
SCREAMING_SNAKE_CASE_ = e.exit_layer
SCREAMING_SNAKE_CASE_ = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE_ = entropy(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ = MSELoss()
SCREAMING_SNAKE_CASE_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE_ = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE_ = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ = MSELoss()
SCREAMING_SNAKE_CASE_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowerCAmelCase )
if train_highway:
SCREAMING_SNAKE_CASE_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE_ = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits) | 31 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __UpperCamelCase ( A__ ):
__A : Union[str, Any] = """wavlm"""
def __init__( self , _UpperCamelCase=32 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase="group" , _UpperCamelCase="gelu" , _UpperCamelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase=False , _UpperCamelCase=128 , _UpperCamelCase=16 , _UpperCamelCase=320 , _UpperCamelCase=800 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=0.05 , _UpperCamelCase=10 , _UpperCamelCase=2 , _UpperCamelCase=0.0 , _UpperCamelCase=10 , _UpperCamelCase=320 , _UpperCamelCase=2 , _UpperCamelCase=0.1 , _UpperCamelCase=100 , _UpperCamelCase=256 , _UpperCamelCase=256 , _UpperCamelCase=0.1 , _UpperCamelCase="mean" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=256 , _UpperCamelCase=(512, 512, 512, 512, 1500) , _UpperCamelCase=(5, 3, 3, 1, 1) , _UpperCamelCase=(1, 2, 3, 1, 1) , _UpperCamelCase=512 , _UpperCamelCase=80 , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=False , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(_UpperCamelCase )
_UpperCAmelCase = list(_UpperCamelCase )
_UpperCAmelCase = list(_UpperCamelCase )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_buckets
_UpperCAmelCase = max_bucket_distance
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_ctc_classes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = do_stable_layer_norm
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase = num_codevectors_per_group
_UpperCAmelCase = num_codevector_groups
_UpperCAmelCase = contrastive_logits_temperature
_UpperCAmelCase = num_negatives
_UpperCAmelCase = codevector_dim
_UpperCAmelCase = proj_codevector_dim
_UpperCAmelCase = diversity_loss_weight
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# adapter
_UpperCAmelCase = add_adapter
_UpperCAmelCase = adapter_kernel_size
_UpperCAmelCase = adapter_stride
_UpperCAmelCase = num_adapter_layers
_UpperCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = list(_UpperCamelCase )
_UpperCAmelCase = list(_UpperCamelCase )
_UpperCAmelCase = list(_UpperCamelCase )
_UpperCAmelCase = xvector_output_dim
@property
def UpperCamelCase( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 32 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = 'trocr'
__lowercase : Optional[Any] = ['past_key_values']
__lowercase : Union[str, Any] = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self:Optional[Any] , _a:Optional[int]=5_02_65 , _a:List[Any]=10_24 , _a:Union[str, Any]=12 , _a:Any=16 , _a:int=40_96 , _a:int="gelu" , _a:Union[str, Any]=5_12 , _a:str=0.1 , _a:Dict=0.0 , _a:int=0.0 , _a:int=2 , _a:Union[str, Any]=0.02 , _a:List[Any]=0.0 , _a:Any=True , _a:Optional[Any]=False , _a:Union[str, Any]=True , _a:List[Any]=True , _a:Any=1 , _a:str=0 , _a:Optional[int]=2 , **_a:int , ):
snake_case__ = vocab_size
snake_case__ = d_model
snake_case__ = decoder_layers
snake_case__ = decoder_attention_heads
snake_case__ = decoder_ffn_dim
snake_case__ = activation_function
snake_case__ = max_position_embeddings
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = init_std
snake_case__ = decoder_layerdrop
snake_case__ = use_cache
snake_case__ = scale_embedding
snake_case__ = use_learned_position_embeddings
snake_case__ = layernorm_embedding
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 33 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCamelCase = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCamelCase_ , cache_dir=lowerCamelCase_)
UpperCamelCase = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_ , os.listdir(lowerCamelCase_)[0] , '''snapshots'''))]
UpperCamelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 4
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.151_4745) < 1e-3
assert np.abs(np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 4_9947.875) < 5e-1
UpperCamelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(lowerCamelCase_) == num_samples
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0565_2401)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 238_3808.2)) < 5e-1
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 237_3516.75)) < 5e-1
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 237_3516.75)) < 5e-1
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , )
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
UpperCamelCase = scheduler.create_state()
UpperCamelCase = scheduler_state
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4504_3945)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 234_7693.5)) < 5e-1
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = jax.random.split(jax.random.PRNGKey(0) , lowerCamelCase_)
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , )
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
UpperCamelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , use_memory_efficient_attention=lowerCamelCase_ , )
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
UpperCamelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2 | 34 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 0 |
import sys
def a ( A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = len(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = [[0 for x in range(A__ )] for x in range(A__ )]
SCREAMING_SNAKE_CASE__ : str = [[0 for x in range(A__ )] for x in range(A__ )]
for chain_length in range(2 , A__ ):
for a in range(1 , n - chain_length + 1 ):
SCREAMING_SNAKE_CASE__ : List[str] = a + chain_length - 1
SCREAMING_SNAKE_CASE__ : Any = sys.maxsize
for c in range(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
SCREAMING_SNAKE_CASE__ : Optional[int] = cost
SCREAMING_SNAKE_CASE__ : int = c
return matrix, sol
def a ( A__ , A__ , A__ ) -> List[str]:
'''simple docstring'''
if i == j:
print('''A''' + str(A__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(A__ , A__ , optimal_solution[i][j] )
print_optiomal_solution(A__ , optimal_solution[i][j] + 1 , A__ )
print(''')''' , end=''' ''' )
def a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
SCREAMING_SNAKE_CASE__ : List[str] = len(A__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = matrix_chain_order(A__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(A__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 35 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[Any] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 224}
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Any = do_resize
snake_case : int = size
snake_case : List[Any] = resample
snake_case : int = do_center_crop
snake_case : str = crop_size
snake_case : List[Any] = do_rescale
snake_case : Union[str, Any] = rescale_factor
snake_case : Any = do_normalize
snake_case : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case : Any = do_convert_rgb
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case : Union[str, Any] = size if size is not None else self.size
snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""size""" ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : Any = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
snake_case : Tuple = image_std if image_std is not None else self.image_std
snake_case : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : List[str] = [convert_to_rgb(SCREAMING_SNAKE_CASE_ ) for image in images]
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
snake_case : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE_ ,mean=SCREAMING_SNAKE_CASE_ ,std=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
| 36 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase_ ( ) -> Optional[Any]:
a__, a__ : Tuple = 9, 14 # noqa: F841
a__ : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a__ : str = defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
a__ : Union[str, Any] = mst(__a )
a__ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
a__ : List[str] = tuple(answer[:2] )
a__ : Any = tuple(edge[::-1] )
assert edge in result or reverse in result
| 37 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Tuple = 1
while len(__magic_name__ ) < 1E6:
constant.append(str(__magic_name__ ) )
i += 1
snake_case__ : Optional[Any] = """""".join(__magic_name__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 38 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 3 ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(SCREAMING_SNAKE_CASE__ ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
snake_case_ = QuantumRegister(SCREAMING_SNAKE_CASE__ , '''qr''' )
snake_case_ = ClassicalRegister(SCREAMING_SNAKE_CASE__ , '''cr''' )
snake_case_ = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# simulate with 10000 shots
snake_case_ = Aer.get_backend('''qasm_simulator''' )
snake_case_ = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=10000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
) | 39 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__UpperCAmelCase = {'''mgp-str''': 27}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="[GO]", SCREAMING_SNAKE_CASE_="[GO]", SCREAMING_SNAKE_CASE_="[s]", SCREAMING_SNAKE_CASE_="[GO]", **SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
with open(SCREAMING_SNAKE_CASE_, encoding='utf-8' ) as vocab_handle:
UpperCamelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def snake_case_ ( self ) -> Any:
return len(self.vocab )
def snake_case_ ( self ) -> List[Any]:
return dict(self.vocab, **self.added_tokens_encoder )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : List[Any] = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.vocab.get(SCREAMING_SNAKE_CASE_, self.vocab.get(self.unk_token ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
UpperCamelCase : Dict = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(SCREAMING_SNAKE_CASE_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=SCREAMING_SNAKE_CASE_, ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
return (vocab_file,)
| 40 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.