code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=5 ): assert masked_input.count("<mask>" ) == 1 SCREAMING_SNAKE_CASE = torch.tensor(tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) ).unsqueeze(0 ) # Batch size 1 SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] # The last hidden-state is the first element of the output tuple SCREAMING_SNAKE_CASE = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() SCREAMING_SNAKE_CASE = logits[0, masked_index, :] SCREAMING_SNAKE_CASE = logits.softmax(dim=0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prob.topk(k=UpperCAmelCase__ , dim=0 ) SCREAMING_SNAKE_CASE = " ".join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCAmelCase__ ) )] ) SCREAMING_SNAKE_CASE = tokenizer.mask_token SCREAMING_SNAKE_CASE = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ): SCREAMING_SNAKE_CASE = predicted_token_bpe.replace("\u2581" , " " ) if " {0}".format(UpperCAmelCase__ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(" {0}".format(UpperCAmelCase__ ) , UpperCAmelCase__ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(UpperCAmelCase__ , UpperCAmelCase__ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs _lowerCamelCase : Optional[Any] = CamembertTokenizer.from_pretrained('''camembert-base''') _lowerCamelCase : str = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() _lowerCamelCase : str = 'Le camembert est <mask> :)' print(fill_mask(masked_input, model, tokenizer, topk=3))
403
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _snake_case : Tuple = logging.get_logger(__name__) _snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case : List[Any] = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } _snake_case : Union[str, Any] = { 'squeezebert/squeezebert-uncased': 512, 'squeezebert/squeezebert-mnli': 512, 'squeezebert/squeezebert-mnli-headless': 512, } _snake_case : Tuple = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class A ( _a ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = SqueezeBertTokenizer def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int: """simple docstring""" super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars ): _a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**lowerCAmelCase_ ) _a = do_lower_case def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]: """simple docstring""" _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" _a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
22
0
import argparse import os import torch from transformers.utils import WEIGHTS_NAME A_ : str = ['small', 'medium', 'large'] A_ : Any = 'lm_head.decoder.weight' A_ : int = 'lm_head.weight' def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict: UpperCamelCase_: List[Any] = torch.load(UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = d.pop(UpperCAmelCase__ ) os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) torch.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": A_ : Any = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) A_ : Tuple = parser.parse_args() for MODEL in DIALOGPT_MODELS: A_ : Optional[Any] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''') A_ : Any = F'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
57
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case : Dict = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = do_rescale _a = do_normalize _a = do_center_crop _a = crop_size _a = size _a = resample _a = rescale_factor _a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "shortest_edge" in size: _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _a = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = do_rescale if do_rescale is not None else self.do_rescale _a = do_normalize if do_normalize is not None else self.do_normalize _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ ) if not is_batched(lowerCAmelCase_ ): _a = [images] if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
22
0
"""simple docstring""" from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" lowerCAmelCase__ :str = [] lowerCAmelCase__ :str = [] lowerCAmelCase__ :str = [] for rt in rc.restypes: lowerCAmelCase__ :Optional[int] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) lowerCAmelCase__ :List[str] = {name: i for i, name in enumerate(_SCREAMING_SNAKE_CASE )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) lowerCAmelCase__ :int = torch.tensor( _SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , ) lowerCAmelCase__ :List[Any] = torch.tensor( _SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , ) lowerCAmelCase__ :List[str] = torch.tensor( _SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein['aatype'].device , ) lowerCAmelCase__ :Dict = protein['aatype'].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowerCAmelCase__ :Tuple = restype_atomaa_to_atomaa[protein_aatype] lowerCAmelCase__ :Optional[Any] = restype_atomaa_mask[protein_aatype] lowerCAmelCase__ :List[str] = residx_atomaa_mask lowerCAmelCase__ :Tuple = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowerCAmelCase__ :int = restype_atomaa_to_atomaa[protein_aatype] lowerCAmelCase__ :Union[str, Any] = residx_atomaa_to_atomaa.long() # create the corresponding mask lowerCAmelCase__ :Dict = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device ) for restype, restype_letter in enumerate(rc.restypes ): lowerCAmelCase__ :Optional[Any] = rc.restype_atoa[restype_letter] lowerCAmelCase__ :Union[str, Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowerCAmelCase__ :Tuple = rc.atom_order[atom_name] lowerCAmelCase__ :Union[str, Any] = 1 lowerCAmelCase__ :Optional[int] = restype_atomaa_mask[protein_aatype] lowerCAmelCase__ :Optional[Any] = residx_atomaa_mask return protein def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" lowerCAmelCase__ :Tuple = tree_map(lambda _SCREAMING_SNAKE_CASE : torch.tensor(_SCREAMING_SNAKE_CASE , device=batch['aatype'].device ) , _SCREAMING_SNAKE_CASE , np.ndarray ) lowerCAmelCase__ :List[Any] = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : np.array(_SCREAMING_SNAKE_CASE ) , make_atomaa_masks(_SCREAMING_SNAKE_CASE ) ) return out
93
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case : str = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[int] = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = ['LayoutLMv3FeatureExtractor'] _snake_case : Tuple = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys _snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
22
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class _SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 42 class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , _snake_case=3 , _snake_case=3 , _snake_case=("DownEncoderBlock2D",) , _snake_case=(64,) , _snake_case=2 , _snake_case=32 , _snake_case="silu" , _snake_case=True , ): """simple docstring""" super().__init__() __lowerCamelCase = layers_per_block __lowerCamelCase = torch.nn.Convad( lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) __lowerCamelCase = None __lowerCamelCase = nn.ModuleList([] ) # down __lowerCamelCase = block_out_channels[0] for i, down_block_type in enumerate(lowerCAmelCase_ ): __lowerCamelCase = output_channel __lowerCamelCase = block_out_channels[i] __lowerCamelCase = i == len(lowerCAmelCase_ ) - 1 __lowerCamelCase = get_down_block( lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , ) self.down_blocks.append(lowerCAmelCase_ ) # mid __lowerCamelCase = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , ) # out __lowerCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 ) __lowerCamelCase = nn.SiLU() __lowerCamelCase = 2 * out_channels if double_z else out_channels __lowerCamelCase = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 ) __lowerCamelCase = False def _lowerCamelCase ( self , _snake_case ): """simple docstring""" __lowerCamelCase = x __lowerCamelCase = self.conv_in(lowerCAmelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(_snake_case ): def custom_forward(*_snake_case ): return module(*lowerCAmelCase_ ) return custom_forward # down if is_torch_version('''>=''' , '''1.11.0''' ): for down_block in self.down_blocks: __lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ ) # middle __lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ ) else: for down_block in self.down_blocks: __lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ ) # middle __lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ ) else: # down for down_block in self.down_blocks: __lowerCamelCase = down_block(lowerCAmelCase_ ) # middle __lowerCamelCase = self.mid_block(lowerCAmelCase_ ) # post-process __lowerCamelCase = self.conv_norm_out(lowerCAmelCase_ ) __lowerCamelCase = self.conv_act(lowerCAmelCase_ ) __lowerCamelCase = self.conv_out(lowerCAmelCase_ ) return sample class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , _snake_case=3 , _snake_case=3 , _snake_case=("UpDecoderBlock2D",) , _snake_case=(64,) , _snake_case=2 , _snake_case=32 , _snake_case="silu" , _snake_case="group" , ): """simple docstring""" super().__init__() __lowerCamelCase = layers_per_block __lowerCamelCase = nn.Convad( lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) __lowerCamelCase = None __lowerCamelCase = nn.ModuleList([] ) __lowerCamelCase = in_channels if norm_type == '''spatial''' else None # mid __lowerCamelCase = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , ) # up __lowerCamelCase = list(reversed(lowerCAmelCase_ ) ) __lowerCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCAmelCase_ ): __lowerCamelCase = output_channel __lowerCamelCase = reversed_block_out_channels[i] __lowerCamelCase = i == len(lowerCAmelCase_ ) - 1 __lowerCamelCase = get_up_block( lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , ) self.up_blocks.append(lowerCAmelCase_ ) __lowerCamelCase = output_channel # out if norm_type == "spatial": __lowerCamelCase = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ ) else: __lowerCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 ) __lowerCamelCase = nn.SiLU() __lowerCamelCase = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 ) __lowerCamelCase = False def _lowerCamelCase ( self , _snake_case , _snake_case=None ): """simple docstring""" __lowerCamelCase = z __lowerCamelCase = self.conv_in(lowerCAmelCase_ ) __lowerCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(_snake_case ): def custom_forward(*_snake_case ): return module(*lowerCAmelCase_ ) return custom_forward if is_torch_version('''>=''' , '''1.11.0''' ): # middle __lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ ) __lowerCamelCase = sample.to(lowerCAmelCase_ ) # up for up_block in self.up_blocks: __lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ ) else: # middle __lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCamelCase = sample.to(lowerCAmelCase_ ) # up for up_block in self.up_blocks: __lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ ) else: # middle __lowerCamelCase = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCamelCase = sample.to(lowerCAmelCase_ ) # up for up_block in self.up_blocks: __lowerCamelCase = up_block(lowerCAmelCase_ , lowerCAmelCase_ ) # post-process if latent_embeds is None: __lowerCamelCase = self.conv_norm_out(lowerCAmelCase_ ) else: __lowerCamelCase = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCamelCase = self.conv_act(lowerCAmelCase_ ) __lowerCamelCase = self.conv_out(lowerCAmelCase_ ) return sample class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case="random" , _snake_case=False , _snake_case=True ): """simple docstring""" super().__init__() __lowerCamelCase = n_e __lowerCamelCase = vq_embed_dim __lowerCamelCase = beta __lowerCamelCase = legacy __lowerCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) __lowerCamelCase = remap if self.remap is not None: self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) ) __lowerCamelCase = self.used.shape[0] __lowerCamelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCamelCase = self.re_embed __lowerCamelCase = self.re_embed + 1 print( F'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' F'''Using {self.unknown_index} for unknown indices.''' ) else: __lowerCamelCase = n_e __lowerCamelCase = sane_index_shape def _lowerCamelCase ( self , _snake_case ): """simple docstring""" __lowerCamelCase = inds.shape assert len(lowerCAmelCase_ ) > 1 __lowerCamelCase = inds.reshape(ishape[0] , -1 ) __lowerCamelCase = self.used.to(lowerCAmelCase_ ) __lowerCamelCase = (inds[:, :, None] == used[None, None, ...]).long() __lowerCamelCase = match.argmax(-1 ) __lowerCamelCase = match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: __lowerCamelCase = self.unknown_index return new.reshape(lowerCAmelCase_ ) def _lowerCamelCase ( self , _snake_case ): """simple docstring""" __lowerCamelCase = inds.shape assert len(lowerCAmelCase_ ) > 1 __lowerCamelCase = inds.reshape(ishape[0] , -1 ) __lowerCamelCase = self.used.to(lowerCAmelCase_ ) if self.re_embed > self.used.shape[0]: # extra token __lowerCamelCase = 0 # simply set to zero __lowerCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ ) return back.reshape(lowerCAmelCase_ ) def _lowerCamelCase ( self , _snake_case ): """simple docstring""" __lowerCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous() __lowerCamelCase = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCamelCase = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 ) __lowerCamelCase = self.embedding(lowerCAmelCase_ ).view(z.shape ) __lowerCamelCase = None __lowerCamelCase = None # compute loss for embedding if not self.legacy: __lowerCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCamelCase = z + (z_q - z).detach() # reshape back to match original input shape __lowerCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: __lowerCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis __lowerCamelCase = self.remap_to_used(lowerCAmelCase_ ) __lowerCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: __lowerCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _lowerCamelCase ( self , _snake_case , _snake_case ): """simple docstring""" if self.remap is not None: __lowerCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis __lowerCamelCase = self.unmap_to_all(lowerCAmelCase_ ) __lowerCamelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCamelCase = self.embedding(lowerCAmelCase_ ) if shape is not None: __lowerCamelCase = z_q.view(lowerCAmelCase_ ) # reshape back to match original input shape __lowerCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class _SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self , _snake_case , _snake_case=False ): """simple docstring""" __lowerCamelCase = parameters __lowerCamelCase , __lowerCamelCase = torch.chunk(lowerCAmelCase_ , 2 , dim=1 ) __lowerCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 ) __lowerCamelCase = deterministic __lowerCamelCase = torch.exp(0.5 * self.logvar ) __lowerCamelCase = torch.exp(self.logvar ) if self.deterministic: __lowerCamelCase = __lowerCamelCase = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def _lowerCamelCase ( self , _snake_case = None ): """simple docstring""" __lowerCamelCase = randn_tensor( self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) __lowerCamelCase = self.mean + self.std * sample return x def _lowerCamelCase ( self , _snake_case=None ): """simple docstring""" if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def _lowerCamelCase ( self , _snake_case , _snake_case=[1, 2, 3] ): """simple docstring""" if self.deterministic: return torch.Tensor([0.0] ) __lowerCamelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ ) def _lowerCamelCase ( self ): """simple docstring""" return self.mean
316
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A ( _a ): lowercase_ = (DDPMParallelScheduler,) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]: """simple docstring""" _a = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowerCAmelCase_ ) return config def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , ) def __lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5 def __lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = self.dummy_sample_deter + 0.1 _a = self.dummy_sample_deter - 0.1 _a = samplea.shape[0] _a = torch.stack([samplea, samplea, samplea] , dim=0 ) _a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ ) _a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config(prediction_type='''v_prediction''' ) _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def __lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) _a = scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase_ ): if i == len(lowerCAmelCase_ ) - 1: _a = -1 else: _a = timesteps[i + 1] _a = scheduler.previous_timestep(lowerCAmelCase_ ) _a = prev_t.item() self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] _a = len(lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
22
0
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Any: '''simple docstring''' A = checkpoint A = {} A = vae_state_dict['encoder.conv_in.weight'] A = vae_state_dict['encoder.conv_in.bias'] A = vae_state_dict['encoder.conv_out.weight'] A = vae_state_dict['encoder.conv_out.bias'] A = vae_state_dict['encoder.norm_out.weight'] A = vae_state_dict['encoder.norm_out.bias'] A = vae_state_dict['decoder.conv_in.weight'] A = vae_state_dict['decoder.conv_in.bias'] A = vae_state_dict['decoder.conv_out.weight'] A = vae_state_dict['decoder.conv_out.bias'] A = vae_state_dict['decoder.norm_out.weight'] A = vae_state_dict['decoder.norm_out.bias'] A = vae_state_dict['quant_conv.weight'] A = vae_state_dict['quant_conv.bias'] A = vae_state_dict['post_quant_conv.weight'] A = vae_state_dict['post_quant_conv.bias'] # Retrieves the keys for the encoder down blocks only A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} ) A = { layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(lowerCAmelCase__ ) } # Retrieves the keys for the decoder up blocks only A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} ) A = { layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(lowerCAmelCase__ ) } for i in range(lowerCAmelCase__ ): A = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key] if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: A = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.weight''' ) A = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.bias''' ) A = renew_vae_resnet_paths(lowerCAmelCase__ ) A = {'old': F'''down.{i}.block''', 'new': F'''down_blocks.{i}.resnets'''} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) A = [key for key in vae_state_dict if 'encoder.mid.block' in key] A = 2 for i in range(1 , num_mid_res_blocks + 1 ): A = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key] A = renew_vae_resnet_paths(lowerCAmelCase__ ) A = {'old': F'''mid.block_{i}''', 'new': F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) A = [key for key in vae_state_dict if 'encoder.mid.attn' in key] A = renew_vae_attention_paths(lowerCAmelCase__ ) A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) conv_attn_to_linear(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ ): A = num_up_blocks - 1 - i A = [ key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key ] if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: A = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.weight''' ] A = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.bias''' ] A = renew_vae_resnet_paths(lowerCAmelCase__ ) A = {'old': F'''up.{block_id}.block''', 'new': F'''up_blocks.{i}.resnets'''} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) A = [key for key in vae_state_dict if 'decoder.mid.block' in key] A = 2 for i in range(1 , num_mid_res_blocks + 1 ): A = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key] A = renew_vae_resnet_paths(lowerCAmelCase__ ) A = {'old': F'''mid.block_{i}''', 'new': F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) A = [key for key in vae_state_dict if 'decoder.mid.attn' in key] A = renew_vae_attention_paths(lowerCAmelCase__ ) A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) conv_attn_to_linear(lowerCAmelCase__ ) return new_checkpoint def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , ) -> Union[str, Any]: '''simple docstring''' A = requests.get( ' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' ) A = io.BytesIO(r.content ) A = OmegaConf.load(lowerCAmelCase__ ) A = 512 A = 'cuda' if torch.cuda.is_available() else 'cpu' if checkpoint_path.endswith('safetensors' ): from safetensors import safe_open A = {} with safe_open(lowerCAmelCase__ , framework='pt' , device='cpu' ) as f: for key in f.keys(): A = f.get_tensor(lowerCAmelCase__ ) else: A = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )['state_dict'] # Convert the VAE model. A = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ ) A = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ ) A = AutoencoderKL(**lowerCAmelCase__ ) vae.load_state_dict(lowerCAmelCase__ ) vae.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": __snake_case :Any =argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') __snake_case :Tuple =parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
106
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def snake_case_ (UpperCamelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ): '''simple docstring''' _a = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(UpperCamelCase , UpperCamelCase ) # Predict target for test data _a = xgb.predict(UpperCamelCase ) _a = predictions.reshape(len(UpperCamelCase ) , 1 ) return predictions def snake_case_ (): '''simple docstring''' _a = fetch_california_housing() _a , _a = data_handling(UpperCamelCase ) _a , _a , _a , _a = train_test_split( UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 ) _a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Error printing print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' ) print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
22
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class _lowerCamelCase ( _a ): """simple docstring""" UpperCAmelCase_ : List[Any] ="mra" def __init__( self , UpperCAmelCase=50265 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=1 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-5 , UpperCAmelCase="absolute" , UpperCAmelCase=4 , UpperCAmelCase="full" , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) __snake_case : Tuple = vocab_size __snake_case : Any = max_position_embeddings __snake_case : Optional[int] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : str = num_attention_heads __snake_case : str = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Dict = initializer_range __snake_case : Any = type_vocab_size __snake_case : Optional[Any] = layer_norm_eps __snake_case : int = position_embedding_type __snake_case : Tuple = block_per_row __snake_case : Union[str, Any] = approx_mode __snake_case : Dict = initial_prior_first_n_blocks __snake_case : List[Any] = initial_prior_diagonal_n_blocks
243
'''simple docstring''' import qiskit def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' _a = qiskit.Aer.get_backend('''aer_simulator''' ) _a = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator _a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(UpperCamelCase ) if __name__ == "__main__": _snake_case : Tuple = half_adder(1, 1) print(F'''Half Adder Output Qubit Counts: {counts}''')
22
0
'''simple docstring''' from collections.abc import Generator from math import sin def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" if len(lowerCamelCase_ ) != 3_2: raise ValueError("Input must be of length 32" ) lowerCAmelCase__ : Optional[Any] = B"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) lowerCAmelCase__ : List[str] = format(lowerCamelCase_ , "08x" )[-8:] lowerCAmelCase__ : Dict = B"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = B"" for char in message: bit_string += format(lowerCamelCase_ , "08b" ).encode("utf-8" ) lowerCAmelCase__ : List[Any] = format(len(lowerCamelCase_ ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(lowerCamelCase_ ) % 5_1_2 != 4_4_8: bit_string += b"0" bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] ) return bit_string def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" if len(lowerCamelCase_ ) % 5_1_2 != 0: raise ValueError("Input must have length that\'s a multiple of 512" ) for pos in range(0 , len(lowerCamelCase_ ) , 5_1_2 ): lowerCAmelCase__ : Dict = bit_string[pos : pos + 5_1_2] lowerCAmelCase__ : Optional[int] = [] for i in range(0 , 5_1_2 , 3_2 ): block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) ) yield block_words def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) lowerCAmelCase__ : Union[str, Any] = format(lowerCamelCase_ , "032b" ) lowerCAmelCase__ : Any = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(lowerCamelCase_ , 2 ) def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return (a + b) % 2**3_2 def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2 def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Tuple = preprocess(lowerCamelCase_ ) lowerCAmelCase__ : str = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )] # Starting states lowerCAmelCase__ : List[str] = 0x67_45_23_01 lowerCAmelCase__ : List[str] = 0xef_cd_ab_89 lowerCAmelCase__ : Optional[Any] = 0x98_ba_dc_fe lowerCAmelCase__ : Union[str, Any] = 0x10_32_54_76 lowerCAmelCase__ : int = [ 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(lowerCamelCase_ ): lowerCAmelCase__ : Dict = aa lowerCAmelCase__ : str = ba lowerCAmelCase__ : int = ca lowerCAmelCase__ : Tuple = da # Hash current chunk for i in range(6_4 ): if i <= 1_5: # f = (b & c) | (not_32(b) & d) # Alternate definition for f lowerCAmelCase__ : Union[str, Any] = d ^ (b & (c ^ d)) lowerCAmelCase__ : Dict = i elif i <= 3_1: # f = (d & b) | (not_32(d) & c) # Alternate definition for f lowerCAmelCase__ : List[Any] = c ^ (d & (b ^ c)) lowerCAmelCase__ : int = (5 * i + 1) % 1_6 elif i <= 4_7: lowerCAmelCase__ : List[Any] = b ^ c ^ d lowerCAmelCase__ : Optional[int] = (3 * i + 5) % 1_6 else: lowerCAmelCase__ : Tuple = c ^ (b | not_aa(lowerCamelCase_ )) lowerCAmelCase__ : Union[str, Any] = (7 * i) % 1_6 lowerCAmelCase__ : Optional[Any] = (f + a + added_consts[i] + block_words[g]) % 2**3_2 lowerCAmelCase__ : List[str] = d lowerCAmelCase__ : List[str] = c lowerCAmelCase__ : Any = b lowerCAmelCase__ : Optional[int] = sum_aa(lowerCamelCase_ , left_rotate_aa(lowerCamelCase_ , shift_amounts[i] ) ) # Add hashed chunk to running total lowerCAmelCase__ : Tuple = sum_aa(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ : int = sum_aa(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ : Dict = sum_aa(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ : Dict = sum_aa(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ : Union[str, Any] = reformat_hex(lowerCamelCase_ ) + reformat_hex(lowerCamelCase_ ) + reformat_hex(lowerCamelCase_ ) + reformat_hex(lowerCamelCase_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
378
'''simple docstring''' from collections.abc import Generator from math import sin def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) != 32: raise ValueError('''Input must be of length 32''' ) _a = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''08x''' )[-8:] _a = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = B'''''' for char in message: bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' ) _a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCamelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(UpperCamelCase ) , 512 ): _a = bit_string[pos : pos + 512] _a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''032b''' ) _a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCamelCase , 2 ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' return (a + b) % 2**32 def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = preprocess(UpperCamelCase ) _a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _a = 0X67452301 _a = 0Xefcdab89 _a = 0X98badcfe _a = 0X10325476 _a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCamelCase ): _a = aa _a = ba _a = ca _a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _a = d ^ (b & (c ^ d)) _a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _a = c ^ (d & (b ^ c)) _a = (5 * i + 1) % 16 elif i <= 47: _a = b ^ c ^ d _a = (3 * i + 5) % 16 else: _a = c ^ (b | not_aa(UpperCamelCase )) _a = (7 * i) % 16 _a = (f + a + added_consts[i] + block_words[g]) % 2**32 _a = d _a = c _a = b _a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) ) # Add hashed chunk to running total _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" def lowercase (_snake_case ,_snake_case ) -> Any: '''simple docstring''' __UpperCamelCase = (boundary[1] - boundary[0]) / steps __UpperCamelCase = boundary[0] __UpperCamelCase = boundary[1] __UpperCamelCase = make_points(_snake_case ,_snake_case ,_snake_case ) __UpperCamelCase = 0.0 y += (h / 2.0) * f(_snake_case ) for i in x_i: # print(i) y += h * f(_snake_case ) y += (h / 2.0) * f(_snake_case ) return y def lowercase (_snake_case ,_snake_case ,_snake_case ) -> List[str]: '''simple docstring''' __UpperCamelCase = a + h while x < (b - h): yield x __UpperCamelCase = x + h def lowercase (_snake_case ) -> Optional[Any]: # enter your function here '''simple docstring''' __UpperCamelCase = (x - 0) * (x - 0) return y def lowercase () -> Dict: '''simple docstring''' __UpperCamelCase = 0.0 # Lower bound of integration __UpperCamelCase = 1.0 # Upper bound of integration __UpperCamelCase = 1_0.0 # define number of steps or resolution __UpperCamelCase = [a, b] # define boundary of integration __UpperCamelCase = method_a(_snake_case ,_snake_case ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
505
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A ( unittest.TestCase ): def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]: """simple docstring""" _a = size if size is not None else {'''height''': 18, '''width''': 18} _a = parent _a = batch_size _a = num_channels _a = image_size _a = min_resolution _a = max_resolution _a = do_resize _a = size _a = do_normalize def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A ( _a ,unittest.TestCase ): lowercase_ = ImageGPTImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _a = ImageGPTImageProcessingTester(self ) @property def __lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) ) def __lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" _a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) _a = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' ) image_processor_first.to_json_file(lowerCAmelCase_ ) _a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCAmelCase_ ) _a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" pass def snake_case_ (): '''simple docstring''' _a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) _a = Image.open(dataset[4]['''file'''] ) _a = Image.open(dataset[5]['''file'''] ) _a = [imagea, imagea] return images @require_vision @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) _a = prepare_images() # test non-batched _a = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) _a = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ ) # test batched _a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) _a = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
22
0
from argparse import ArgumentParser from .env import EnvironmentCommand def UpperCamelCase_( ) -> Any: UpperCAmelCase__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' ) UpperCAmelCase__ = parser.add_subparsers(help='diffusers-cli command helpers' ) # Register commands EnvironmentCommand.register_subcommand(snake_case__ ) # Let's go UpperCAmelCase__ = parser.parse_args() if not hasattr(snake_case__ , 'func' ): parser.print_help() exit(1 ) # Run UpperCAmelCase__ = args.func(snake_case__ ) service.run() if __name__ == "__main__": main()
146
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" _a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _a = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) _a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits _a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean() _a = -(labels.shape[-1] * loss.item()) _a = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
22
0
from __future__ import annotations from collections.abc import Callable def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 100, ) ->Union[str, Any]: """simple docstring""" lowercase : Tuple = x_start lowercase : Tuple = fnc(_UpperCamelCase ) lowercase : Tuple = 0.0 for _ in range(_UpperCamelCase ): # Approximates small segments of curve as linear and solve # for trapezoidal area lowercase : Optional[Any] = (x_end - x_start) / steps + xa lowercase : Any = fnc(_UpperCamelCase ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step lowercase : Optional[int] = xa lowercase : Optional[int] = fxa return area if __name__ == "__main__": def __lowercase ( _UpperCamelCase ) ->Dict: """simple docstring""" return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') __a = 10 while i <= 10_00_00: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
319
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _snake_case : Optional[Any] = 8 def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ): '''simple docstring''' _a = x.device _a = (x * 255).int().clamp(0 , 255 ) _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' ) _a = ((x & mask) != 0).float() _a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' ) _a = bits * 2 - 1 return bits def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ): '''simple docstring''' _a = x.device _a = (x > 0).int() _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 ) _a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _a = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _a = self.alphas_cumprod[timestep] _a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _a = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) _a = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu''' _a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase ) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise _a = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ): '''simple docstring''' _a = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 ) else: _a = None # 1. compute alphas, betas _a = self.alphas_cumprod[t] _a = self.alphas_cumprod[t - 1] if t > 0 else self.one _a = 1 - alpha_prod_t _a = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _a = model_output else: raise ValueError(f'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _a = 0 if t > 0: _a = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device ) _a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise _a = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) class A ( _a ): def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int: """simple docstring""" super().__init__() _a = bit_scale _a = ( ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" _a = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , ) _a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale _a = latents.to(self.device ) self.scheduler.set_timesteps(lowerCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample _a = bits_to_decimal(lowerCAmelCase_ ) if output_type == "pil": _a = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
22
0
import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class __lowerCAmelCase ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 'MCTCTFeatureExtractor' _SCREAMING_SNAKE_CASE = 'AutoTokenizer' def __init__( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> str: """simple docstring""" super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) snake_case_ = self.feature_extractor snake_case_ = False def __call__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) snake_case_ = kwargs.pop("raw_speech" ) else: snake_case_ = kwargs.pop("audio" , lowerCAmelCase_ ) snake_case_ = kwargs.pop("sampling_rate" , lowerCAmelCase_ ) snake_case_ = kwargs.pop("text" , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: snake_case_ = args[0] snake_case_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: snake_case_ = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: snake_case_ = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: snake_case_ = encodings["input_ids"] return inputs def lowerCAmelCase__ ( self : str , *_lowerCAmelCase : Any , **_lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase__ ( self : Tuple , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ) -> Optional[int]: """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_ ) snake_case_ = kwargs.pop("input_features" , lowerCAmelCase_ ) snake_case_ = kwargs.pop("labels" , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: snake_case_ = args[0] snake_case_ = args[1:] if input_features is not None: snake_case_ = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) if labels is not None: snake_case_ = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ ) if labels is None: return input_features elif input_features is None: return labels else: snake_case_ = labels["input_ids"] return input_features def lowerCAmelCase__ ( self : str , *_lowerCAmelCase : Dict , **_lowerCAmelCase : List[Any] ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @contextmanager def lowerCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) snake_case_ = True snake_case_ = self.tokenizer yield snake_case_ = self.feature_extractor snake_case_ = False
283
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Any = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A ( _a ): lowercase_ = 'roformer' def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _a = vocab_size _a = hidden_size if embedding_size is None else embedding_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = rotary_value _a = use_cache class A ( _a ): @property def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} _a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
22
0
import requests from bsa import BeautifulSoup def __lowerCamelCase (UpperCAmelCase__ : str = "AAPL" ): SCREAMING_SNAKE_CASE = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(UpperCAmelCase__ ).text , "html.parser" ) SCREAMING_SNAKE_CASE = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
403
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A : lowercase_ = 42 lowercase_ = 42 class A : def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str: """simple docstring""" _a = [[] for _ in range(lowerCAmelCase_ )] _a = size def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def __lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" return self._size def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict: """simple docstring""" if weight not in (0, 1): raise ValueError('''Edge weight must be either 0 or 1.''' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('''Vertex indexes must be in [0; size).''' ) self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None: """simple docstring""" _a = deque([start_vertex] ) _a = [None] * self.size _a = 0 while queue: _a = queue.popleft() _a = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _a = current_distance + edge.weight _a = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and new_distance >= dest_vertex_distance ): continue _a = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('''No path from start_vertex to finish_vertex.''' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
22
0
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class _lowerCAmelCase( _a ): """simple docstring""" def __get__( self , _lowerCamelCase , _lowerCamelCase=None ): if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute' ) UpperCamelCase_: str = '__cached_' + self.fget.__name__ UpperCamelCase_: List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if cached is None: UpperCamelCase_: Dict = self.fget(lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return cached def snake_case (UpperCAmelCase__ ) -> int: UpperCamelCase_: Optional[int] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'''invalid truth value {val!r}''' ) def snake_case (UpperCAmelCase__ ) -> int: if is_torch_fx_proxy(UpperCAmelCase__ ): return True if is_torch_available(): import torch if isinstance(UpperCAmelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCAmelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCAmelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCAmelCase__ , np.ndarray ) def snake_case (UpperCAmelCase__ ) -> Any: return isinstance(UpperCAmelCase__ , np.ndarray ) def snake_case (UpperCAmelCase__ ) -> Optional[int]: return _is_numpy(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Tuple: import torch return isinstance(UpperCAmelCase__ , torch.Tensor ) def snake_case (UpperCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Optional[Any]: import torch return isinstance(UpperCAmelCase__ , torch.device ) def snake_case (UpperCAmelCase__ ) -> List[Any]: return False if not is_torch_available() else _is_torch_device(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Optional[Any]: import torch if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ): UpperCamelCase_: int = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) else: return False return isinstance(UpperCAmelCase__ , torch.dtype ) def snake_case (UpperCAmelCase__ ) -> Tuple: return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Union[str, Any]: import tensorflow as tf return isinstance(UpperCAmelCase__ , tf.Tensor ) def snake_case (UpperCAmelCase__ ) -> Any: return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Tuple: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCAmelCase__ , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(UpperCAmelCase__ ) return type(UpperCAmelCase__ ) == tf.Tensor def snake_case (UpperCAmelCase__ ) -> int: return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> List[str]: import jax.numpy as jnp # noqa: F811 return isinstance(UpperCAmelCase__ , jnp.ndarray ) def snake_case (UpperCAmelCase__ ) -> Union[str, Any]: return False if not is_flax_available() else _is_jax(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Any: if isinstance(UpperCAmelCase__ , (dict, UserDict) ): return {k: to_py_obj(UpperCAmelCase__ ) for k, v in obj.items()} elif isinstance(UpperCAmelCase__ , (list, tuple) ): return [to_py_obj(UpperCAmelCase__ ) for o in obj] elif is_tf_tensor(UpperCAmelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(UpperCAmelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCAmelCase__ ): return np.asarray(UpperCAmelCase__ ).tolist() elif isinstance(UpperCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def snake_case (UpperCAmelCase__ ) -> List[Any]: if isinstance(UpperCAmelCase__ , (dict, UserDict) ): return {k: to_numpy(UpperCAmelCase__ ) for k, v in obj.items()} elif isinstance(UpperCAmelCase__ , (list, tuple) ): return np.array(UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): return obj.numpy() elif is_torch_tensor(UpperCAmelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCAmelCase__ ): return np.asarray(UpperCAmelCase__ ) else: return obj class _lowerCAmelCase( _a ): """simple docstring""" def _a ( self ): UpperCamelCase_: Any = fields(self ) # Safety and consistency checks if not len(lowerCAmelCase_ ): raise ValueError(f'''{self.__class__.__name__} has no fields.''' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' ) UpperCamelCase_: Tuple = getattr(self , class_fields[0].name ) UpperCamelCase_: Optional[int] = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(lowerCAmelCase_ ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCamelCase_: List[Any] = first_field.items() UpperCamelCase_: Dict = True else: try: UpperCamelCase_: List[Any] = iter(lowerCAmelCase_ ) UpperCamelCase_: Tuple = True except TypeError: UpperCamelCase_: int = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(lowerCAmelCase_ ): if ( not isinstance(lowerCAmelCase_ , (list, tuple) ) or not len(lowerCAmelCase_ ) == 2 or not isinstance(element[0] , lowerCAmelCase_ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCamelCase_: Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCamelCase_: Optional[int] = element[1] elif first_field is not None: UpperCamelCase_: Optional[Any] = first_field else: for field in class_fields: UpperCamelCase_: Any = getattr(self , field.name ) if v is not None: UpperCamelCase_: Any = v def __delitem__( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' ) def _a ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' ) def _a ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' ) def _a ( self , *_lowerCamelCase , **_lowerCamelCase ): raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' ) def __getitem__( self , _lowerCamelCase ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCamelCase_: Optional[Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , _lowerCamelCase , _lowerCamelCase ): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(lowerCAmelCase_ , lowerCAmelCase_ ) super().__setattr__(lowerCAmelCase_ , lowerCAmelCase_ ) def __setitem__( self , _lowerCamelCase , _lowerCamelCase ): super().__setitem__(lowerCAmelCase_ , lowerCAmelCase_ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( self ): return tuple(self[k] for k in self.keys() ) class _lowerCAmelCase( _a , _a ): """simple docstring""" @classmethod def _a ( cls , _lowerCamelCase ): raise ValueError( f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' ) class _lowerCAmelCase( _a ): """simple docstring""" a : Tuple ='''longest''' a : Optional[Any] ='''max_length''' a : Union[str, Any] ='''do_not_pad''' class _lowerCAmelCase( _a ): """simple docstring""" a : Any ='''pt''' a : Dict ='''tf''' a : Optional[int] ='''np''' a : Dict ='''jax''' class _lowerCAmelCase: """simple docstring""" def __init__( self , _lowerCamelCase ): UpperCamelCase_: Optional[int] = context_managers UpperCamelCase_: Optional[Any] = ExitStack() def __enter__( self ): for context_manager in self.context_managers: self.stack.enter_context(lowerCAmelCase_ ) def __exit__( self , *_lowerCamelCase , **_lowerCamelCase ): self.stack.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case (UpperCAmelCase__ ) -> Union[str, Any]: UpperCamelCase_: str = infer_framework(UpperCAmelCase__ ) if framework == "tf": UpperCamelCase_: Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCamelCase_: int = inspect.signature(model_class.forward ) # PyTorch models else: UpperCamelCase_: int = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def snake_case (UpperCAmelCase__ ) -> Optional[int]: UpperCamelCase_: Optional[int] = model_class.__name__ UpperCamelCase_: Any = infer_framework(UpperCAmelCase__ ) if framework == "tf": UpperCamelCase_: List[str] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCamelCase_: Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCamelCase_: Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = "" , UpperCAmelCase__ = "." ) -> Optional[Any]: def _flatten_dict(UpperCAmelCase__ , UpperCAmelCase__="" , UpperCAmelCase__="." ): for k, v in d.items(): UpperCamelCase_: Dict = str(UpperCAmelCase__ ) + delimiter + str(UpperCAmelCase__ ) if parent_key else k if v and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): yield from flatten_dict(UpperCAmelCase__ , UpperCAmelCase__ , delimiter=UpperCAmelCase__ ).items() else: yield key, v return dict(_flatten_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) ) @contextmanager def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = False ) -> Union[str, Any]: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def snake_case (UpperCAmelCase__ , UpperCAmelCase__=None ) -> List[str]: if is_numpy_array(UpperCAmelCase__ ): return np.transpose(UpperCAmelCase__ , axes=UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.T if axes is None else array.permute(*UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.transpose(UpperCAmelCase__ , perm=UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return jnp.transpose(UpperCAmelCase__ , axes=UpperCAmelCase__ ) else: raise ValueError(F'''Type not supported for transpose: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]: if is_numpy_array(UpperCAmelCase__ ): return np.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.reshape(*UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return jnp.reshape(UpperCAmelCase__ , UpperCAmelCase__ ) else: raise ValueError(F'''Type not supported for reshape: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__=None ) -> Any: if is_numpy_array(UpperCAmelCase__ ): return np.squeeze(UpperCAmelCase__ , axis=UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.squeeze(UpperCAmelCase__ , axis=UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return jnp.squeeze(UpperCAmelCase__ , axis=UpperCAmelCase__ ) else: raise ValueError(F'''Type not supported for squeeze: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: if is_numpy_array(UpperCAmelCase__ ): return np.expand_dims(UpperCAmelCase__ , UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.unsqueeze(dim=UpperCAmelCase__ ) elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.expand_dims(UpperCAmelCase__ , axis=UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return jnp.expand_dims(UpperCAmelCase__ , axis=UpperCAmelCase__ ) else: raise ValueError(F'''Type not supported for expand_dims: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ ) -> Dict: if is_numpy_array(UpperCAmelCase__ ): return np.size(UpperCAmelCase__ ) elif is_torch_tensor(UpperCAmelCase__ ): return array.numel() elif is_tf_tensor(UpperCAmelCase__ ): import tensorflow as tf return tf.size(UpperCAmelCase__ ) elif is_jax_tensor(UpperCAmelCase__ ): return array.size else: raise ValueError(F'''Type not supported for expand_dims: {type(UpperCAmelCase__ )}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]: for key, value in auto_map.items(): if isinstance(UpperCAmelCase__ , (tuple, list) ): UpperCamelCase_: Optional[int] = [F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCamelCase_: Tuple = F'''{repo_id}--{value}''' return auto_map def snake_case (UpperCAmelCase__ ) -> str: for base_class in inspect.getmro(UpperCAmelCase__ ): UpperCamelCase_: str = base_class.__module__ UpperCamelCase_: List[str] = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'''Could not infer framework from class {model_class}.''' )
57
'''simple docstring''' from math import pi, sqrt def snake_case_ (UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) if num > 171.5: raise OverflowError('''math range error''' ) elif num - int(UpperCamelCase ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(UpperCamelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def snake_case_ (): '''simple docstring''' assert gamma(0.5 ) == sqrt(UpperCamelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _snake_case : Optional[Any] = 1.0 while num: _snake_case : Dict = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
22
0
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency __A = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } __A = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' __A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" lowerCAmelCase__ :Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __A (_SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" return x[0] def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :List[str] = get_letter_count(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Tuple = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :int = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[Any] = ''.join(freq_to_letter[freq] ) lowerCAmelCase__ :str = list(freq_to_letter_str.items() ) freq_pairs.sort(key=_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[Any] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :List[Any] = get_frequency_order(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
93
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
22
0
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _UpperCamelCase : Tuple =6_37_81_37.0 _UpperCamelCase : Optional[int] =6_35_67_52.31_42_45 _UpperCamelCase : int =6_37_81_37 def lowerCamelCase_ ( A_ , A_ , A_ , A_ ): __lowerCamelCase = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __lowerCamelCase = atan((1 - flattening) * tan(radians(A_ ) ) ) __lowerCamelCase = atan((1 - flattening) * tan(radians(A_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __lowerCamelCase = haversine_distance(A_ , A_ , A_ , A_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values __lowerCamelCase = (b_lata + b_lata) / 2 __lowerCamelCase = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __lowerCamelCase = (sin(A_ ) ** 2) * (cos(A_ ) ** 2) __lowerCamelCase = cos(sigma / 2 ) ** 2 __lowerCamelCase = (sigma - sin(A_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __lowerCamelCase = (cos(A_ ) ** 2) * (sin(A_ ) ** 2) __lowerCamelCase = sin(sigma / 2 ) ** 2 __lowerCamelCase = (sigma + sin(A_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
316
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' _snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' _snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def snake_case_ (UpperCamelCase : Tuple ): '''simple docstring''' def remove_articles(UpperCamelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase ) def white_space_fix(UpperCamelCase : Union[str, Any] ): return " ".join(text.split() ) def remove_punc(UpperCamelCase : str ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCamelCase : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) ) def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' _a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )] return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100 def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(UpperCamelCase ) _a = Counter(UpperCamelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(UpperCamelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = keeptmpscorea / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(UpperCamelCase ) > 0: _a = deltmpscorea / len(UpperCamelCase ) # ADDITION _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = set(UpperCamelCase ) & set(UpperCamelCase ) _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' _a = len(UpperCamelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ): '''simple docstring''' if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ): sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] ) _a = sari_score / len(UpperCamelCase ) return 100 * sari_score def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ): '''simple docstring''' _a = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(UpperCamelCase )] _a = sacrebleu.corpus_bleu( UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict: """simple docstring""" _a = {} result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) return result
22
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig __snake_case :Dict =logging.get_logger(__name__) __snake_case :int ={ 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class lowerCAmelCase__ ( _a ): A_ : Any = 'dpt' def __init__( self : List[Any] , __UpperCamelCase : int=768 , __UpperCamelCase : str=12 , __UpperCamelCase : Any=12 , __UpperCamelCase : List[Any]=3_072 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : Any=1e-12 , __UpperCamelCase : List[Any]=384 , __UpperCamelCase : Optional[Any]=16 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=False , __UpperCamelCase : str=True , __UpperCamelCase : Dict=[2, 5, 8, 11] , __UpperCamelCase : Optional[Any]="project" , __UpperCamelCase : int=[4, 2, 1, 0.5] , __UpperCamelCase : Optional[Any]=[96, 192, 384, 768] , __UpperCamelCase : List[Any]=256 , __UpperCamelCase : Optional[int]=-1 , __UpperCamelCase : int=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Any=0.4 , __UpperCamelCase : List[str]=255 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple=[1, 1_024, 24, 24] , __UpperCamelCase : Optional[int]=[0, 1] , __UpperCamelCase : int=None , **__UpperCamelCase : Dict , ) -> Union[str, Any]: super().__init__(**lowerCAmelCase_ ) A = hidden_size A = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) A = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } A = BitConfig(**lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): logger.info('Initializing the config with a `BiT` backbone.' ) A = BitConfig(**lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A = backbone_config else: raise ValueError( f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) A = backbone_featmap_shape A = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: A = None A = None A = [] A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = patch_size A = num_channels A = qkv_bias A = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) A = readout_type A = reassemble_factors A = neck_hidden_sizes A = fusion_hidden_size A = head_in_index A = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) A = use_auxiliary_head A = auxiliary_loss_weight A = semantic_loss_ignore_index A = semantic_classifier_dropout def __UpperCamelCase ( self : Tuple ) -> List[str]: A = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: A = self.backbone_config.to_dict() A = self.__class__.model_type return output
106
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): _snake_case : Tuple = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: _snake_case : Any = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def snake_case_ (UpperCamelCase : Optional[int] ): '''simple docstring''' _a = (images / 2 + 0.5).clamp(0 , 1 ) _a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _a = numpy_to_pil(UpperCamelCase ) return images def snake_case_ (UpperCamelCase : str ): '''simple docstring''' if images.ndim == 3: _a = images[None, ...] _a = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _a = [Image.fromarray(UpperCamelCase ) for image in images] return pil_images
22
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCamelCase ( _a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : int =GPTaTokenizer UpperCAmelCase_ : Optional[int] =GPTaTokenizerFast UpperCAmelCase_ : Dict =True UpperCAmelCase_ : Optional[int] ={"add_prefix_space": True} UpperCAmelCase_ : List[str] =False def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : str = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] __snake_case : List[Any] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) __snake_case : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __snake_case : Optional[Any] = {"unk_token": "<unk>"} __snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase_ ) ) def UpperCAmelCase ( self , **UpperCAmelCase ) -> List[str]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def UpperCAmelCase ( self , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = "lower newer" __snake_case : Optional[Any] = "lower newer" return input_text, output_text def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Optional[Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case : Optional[Any] = "lower newer" __snake_case : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] __snake_case : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) __snake_case : List[str] = tokens + [tokenizer.unk_token] __snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' if not self.test_rust_tokenizer: return __snake_case : List[str] = self.get_tokenizer() __snake_case : List[str] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ ) __snake_case : Optional[int] = "lower newer" # Testing tokenization __snake_case : Dict = tokenizer.tokenize(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) __snake_case : Dict = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing conversion to ids without special tokens __snake_case : Optional[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) __snake_case : Dict = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing conversion to ids with special tokens __snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase_ ) __snake_case : Any = tokenizer.encode(lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) __snake_case : Dict = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing the unknown token __snake_case : Optional[Any] = tokens + [rust_tokenizer.unk_token] __snake_case : Any = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' pass def UpperCAmelCase ( self , UpperCAmelCase=15 ) -> Any: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __snake_case : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) # Simple input __snake_case : Union[str, Any] = "This is a simple input" __snake_case : Dict = ["This is a simple input 1", "This is a simple input 2"] __snake_case : Tuple = ("This is a simple input", "This is a pair") __snake_case : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" ) # Simple input self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" ) # Simple input self.assertRaises( lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" , ) # Pair input self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" ) # Pair input self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" ) # Pair input self.assertRaises( lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" , ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : str = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input __snake_case : Optional[Any] = "This is a simple input" __snake_case : List[Any] = ["This is a simple input looooooooong", "This is a simple input"] __snake_case : Union[str, Any] = ("This is a simple input", "This is a pair") __snake_case : Union[str, Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] __snake_case : int = tokenizer.pad_token_id __snake_case : Optional[Any] = tokenizer(lowerCAmelCase_ , padding="max_length" , max_length=30 , return_tensors="np" ) __snake_case : Dict = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors="np" ) __snake_case : Optional[Any] = tokenizer(*lowerCAmelCase_ , padding="max_length" , max_length=60 , return_tensors="np" ) __snake_case : Any = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncate=lowerCAmelCase_ , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Tuple = "$$$" __snake_case : str = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ ) __snake_case : str = "This is a simple input" __snake_case : str = ["This is a simple input 1", "This is a simple input 2"] __snake_case : Union[str, Any] = tokenizer.bos_token_id __snake_case : Tuple = tokenizer(lowerCAmelCase_ ) __snake_case : List[Any] = tokenizer(lowerCAmelCase_ ) self.assertEqual(out_s.input_ids[0] , lowerCAmelCase_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __snake_case : List[str] = tokenizer.decode(out_s.input_ids ) __snake_case : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , lowerCAmelCase_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' pass def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : int = [self.get_tokenizer(do_lower_case=lowerCAmelCase_ , add_bos_token=lowerCAmelCase_ )] for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __snake_case : Union[str, Any] = "Encode this." __snake_case : Tuple = "This one too please." __snake_case : str = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) encoded_sequence += tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) __snake_case : str = tokenizer.encode_plus( lowerCAmelCase_ , lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , ) __snake_case : Any = encoded_sequence_dict["input_ids"] __snake_case : Optional[Any] = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) __snake_case : List[Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCAmelCase_ ) ] __snake_case : Any = [x for x in filtered_sequence if x is not None] self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase_ ) __snake_case : Any = "A photo of a cat" __snake_case : Optional[Any] = tokenizer.encode( lowerCAmelCase_ , ) self.assertEqual(lowerCAmelCase_ , [2, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("test_opt" ) __snake_case : str = AutoTokenizer.from_pretrained("./test_opt" ) __snake_case : Tuple = tokenizer.encode( lowerCAmelCase_ , ) self.assertEqual(lowerCAmelCase_ , [2, 250, 1345, 9, 10, 4758] ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowerCAmelCase_ ) __snake_case : int = "A photo of a cat" __snake_case : Union[str, Any] = tokenizer.encode( lowerCAmelCase_ , ) # Same as above self.assertEqual(lowerCAmelCase_ , [2, 250, 1345, 9, 10, 4758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowerCAmelCase_ ) __snake_case : Optional[Any] = "bos" __snake_case : List[Any] = tokenizer.get_vocab()["bos"] __snake_case : Any = "A photo of a cat" __snake_case : Optional[Any] = tokenizer.encode( lowerCAmelCase_ , ) # We changed the bos token self.assertEqual(lowerCAmelCase_ , [31957, 250, 1345, 9, 10, 4758] ) tokenizer.save_pretrained("./tok" ) __snake_case : List[str] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) __snake_case : Optional[Any] = tokenizer.encode( lowerCAmelCase_ , ) self.assertEqual(lowerCAmelCase_ , [31957, 250, 1345, 9, 10, 4758] )
243
'''simple docstring''' import requests def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' _a = {'''Content-Type''': '''application/json'''} _a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase ) if response.status_code != 200: _a = ( '''Request to slack returned an error ''' f'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
22
0
'''simple docstring''' import numpy # List of input, output pairs snake_case = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) snake_case = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) snake_case = [2, 4, 1, 5] snake_case = len(train_data) snake_case = 0.009 def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_="train" ): """simple docstring""" return calculate_hypothesis_value(lowerCamelCase_ , lowerCamelCase_ ) - output( lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : List[Any] = 0 for i in range(len(lowerCamelCase_ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=m ): """simple docstring""" lowerCAmelCase__ : str = 0 for i in range(lowerCamelCase_ ): if index == -1: summation_value += _error(lowerCamelCase_ ) else: summation_value += _error(lowerCamelCase_ ) * train_data[i][0][index] return summation_value def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : List[str] = summation_of_cost_derivative(lowerCamelCase_ , lowerCamelCase_ ) / m return cost_derivative_value def UpperCAmelCase_ ( ): """simple docstring""" global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCAmelCase__ : str = 0.00_0002 lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = 0 while True: j += 1 lowerCAmelCase__ : Tuple = [0, 0, 0, 0] for i in range(0 , len(lowerCamelCase_ ) ): lowerCAmelCase__ : Optional[int] = get_cost_derivative(i - 1 ) lowerCAmelCase__ : Dict = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCamelCase_ , lowerCamelCase_ , atol=lowerCamelCase_ , rtol=lowerCamelCase_ , ): break lowerCAmelCase__ : Optional[int] = temp_parameter_vector print(("Number of iterations:", j) ) def UpperCAmelCase_ ( ): """simple docstring""" for i in range(len(lowerCamelCase_ ) ): print(("Actual output value:", output(lowerCamelCase_ , "test" )) ) print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase_ , "test" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
378
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _snake_case : Tuple = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''shortest_edge''': 2_56} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any: """simple docstring""" _a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase_ ): _a = target_sizes.numpy() _a = [] for idx in range(len(lowerCAmelCase_ ) ): _a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ ) _a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: _a = logits.argmax(dim=1 ) _a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase (_snake_case ,_snake_case=False ) -> List[Any]: '''simple docstring''' try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(_snake_case ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"""If set, {key} must be yes or no.""" ) return _value _A = parse_flag_from_env("RUN_SLOW", default=False) _A = parse_flag_from_env("RUN_REMOTE", default=False) _A = parse_flag_from_env("RUN_LOCAL", default=True) _A = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression _A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") _A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") _A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio _A = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ", ) # Beam _A = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility _A = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows _A = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def lowercase (_snake_case ) -> Tuple: '''simple docstring''' try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip("test requires faiss" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Union[str, Any]: '''simple docstring''' try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip("test requires regex" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Optional[Any]: '''simple docstring''' try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip("test requires elasticsearch" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Optional[Any]: '''simple docstring''' try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip("test requires sqlalchemy" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Dict: '''simple docstring''' if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip("test requires PyTorch" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Optional[Any]: '''simple docstring''' if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip("test requires TensorFlow" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Optional[Any]: '''simple docstring''' if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip("test requires JAX" )(_snake_case ) return test_case def lowercase (_snake_case ) -> int: '''simple docstring''' if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip("test requires Pillow" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Any: '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers" )(_snake_case ) else: return test_case def lowercase (_snake_case ) -> int: '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken" )(_snake_case ) else: return test_case def lowercase (_snake_case ) -> Union[str, Any]: '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy" )(_snake_case ) else: return test_case def lowercase (_snake_case ) -> Tuple: '''simple docstring''' def _require_spacy_model(_snake_case ): try: import spacy # noqa F401 spacy.load(_snake_case ) except ImportError: return unittest.skip("test requires spacy" )(_snake_case ) except OSError: return unittest.skip("test requires spacy model \'{}\'".format(_snake_case ) )(_snake_case ) else: return test_case return _require_spacy_model def lowercase (_snake_case ) -> Dict: '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark" )(_snake_case ) else: return test_case def lowercase (_snake_case ) -> List[str]: '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark" )(_snake_case ) else: return test_case def lowercase (_snake_case ) -> List[str]: '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip("test is slow" )(_snake_case ) return test_case def lowercase (_snake_case ) -> int: '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip("test is local" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Union[str, Any]: '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip("test is packaged" )(_snake_case ) return test_case def lowercase (_snake_case ) -> Optional[int]: '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip("test requires remote" )(_snake_case ) return test_case def lowercase (*_snake_case ) -> str: '''simple docstring''' def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_snake_case ) and name.startswith("test" ): for decorator in decorators: __UpperCamelCase = decorator(_snake_case ) setattr(cls ,_snake_case ,_snake_case ) return cls return decorate class __UpperCAmelCase ( _a ): """simple docstring""" pass class __UpperCAmelCase ( _a ): """simple docstring""" _snake_case : str = 0 _snake_case : str = 1 _snake_case : Optional[int] = 2 @contextmanager def lowercase (_snake_case=OfflineSimulationMode.CONNECTION_FAILS ,_snake_case=1e-16 ) -> Dict: '''simple docstring''' __UpperCamelCase = requests.Session().request def timeout_request(_snake_case ,_snake_case ,_snake_case ,**_snake_case ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = "https://10.255.255.1" if kwargs.get("timeout" ) is None: raise RequestWouldHangIndefinitelyError( f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) __UpperCamelCase = timeout try: return online_request(_snake_case ,_snake_case ,**_snake_case ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace("10.255.255.1" ,f"""OfflineMock[{url}]""" ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(_snake_case ,_snake_case ,**_snake_case ): raise requests.ConnectionError("Offline mode is enabled." ,request=_snake_case ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" ,_snake_case ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" ,_snake_case ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" ,_snake_case ): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum." ) @contextmanager def lowercase (*_snake_case ,**_snake_case ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_snake_case ,**_snake_case ) as tmp_dir: try: os.chdir(_snake_case ) yield finally: os.chdir(_snake_case ) @contextmanager def lowercase () -> List[str]: '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase () -> Optional[Any]: '''simple docstring''' import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase (_snake_case ,_snake_case ) -> Tuple: '''simple docstring''' return deepcopy(_snake_case ).integers(0 ,100 ,10 ).tolist() == deepcopy(_snake_case ).integers(0 ,100 ,10 ).tolist() def lowercase (_snake_case ) -> Optional[int]: '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(_snake_case ,*_snake_case ,**_snake_case ): try: return func(*_snake_case ,**_snake_case ) except HTTPError as err: if str(_snake_case ).startswith("500" ) or str(_snake_case ).startswith("502" ): pytest.xfail(str(_snake_case ) ) raise err return decorator.decorator(_wrapper ,_snake_case ) class __UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , A_ : str , A_ : Union[str, Any] , A_ : List[Any] )-> Dict: __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase (_snake_case ,_snake_case ) -> List[str]: '''simple docstring''' while True: __UpperCamelCase = await stream.readline() if line: callback(_snake_case ) else: break async def lowercase (_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=False ,_snake_case=False ) -> str: '''simple docstring''' if echo: print("\nRunning: " ," ".join(_snake_case ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=_snake_case ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_snake_case ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(_snake_case ,_snake_case ,_snake_case ,_snake_case="" ): __UpperCamelCase = line.decode("utf-8" ).rstrip() sink.append(_snake_case ) if not quiet: print(_snake_case ,_snake_case ,file=_snake_case ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout ,lambda _snake_case : tee(_snake_case ,_snake_case ,sys.stdout ,label="stdout:" ) ), _read_stream(p.stderr ,lambda _snake_case : tee(_snake_case ,_snake_case ,sys.stderr ,label="stderr:" ) ), ] ,timeout=_snake_case ,) return _RunOutput(await p.wait() ,_snake_case ,_snake_case ) def lowercase (_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=180 ,_snake_case=False ,_snake_case=True ) -> List[str]: '''simple docstring''' __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(_snake_case ,env=_snake_case ,stdin=_snake_case ,timeout=_snake_case ,quiet=_snake_case ,echo=_snake_case ) ) __UpperCamelCase = " ".join(_snake_case ) if result.returncode > 0: __UpperCamelCase = "\n".join(result.stderr ) raise RuntimeError( f"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n""" f"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"""\'{cmd_str}\' produced no output.""" ) return result def lowercase () -> List[str]: '''simple docstring''' __UpperCamelCase = os.environ.get("PYTEST_XDIST_WORKER" ,"gw0" ) __UpperCamelCase = re.sub(r"^gw" ,"" ,_snake_case ,0 ,re.M ) return int(_snake_case ) def lowercase () -> Any: '''simple docstring''' __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
505
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ): '''simple docstring''' _a = {} if train_file is not None: _a = [train_file] if eval_file is not None: _a = [eval_file] if test_file is not None: _a = [test_file] _a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase ) _a = list(ds[list(files.keys() )[0]].features.keys() ) _a = features_name.pop(UpperCamelCase ) _a = list(set(ds[list(files.keys() )[0]][label_name] ) ) _a = {label: i for i, label in enumerate(UpperCamelCase )} _a = tokenizer.model_input_names _a = {} if len(UpperCamelCase ) == 1: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , ) elif len(UpperCamelCase ) == 2: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _snake_case : str = logging.getLogger(__name__) @dataclass class A : lowercase_ = field(metadata={'help': 'Which column contains the label'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} ) lowercase_ = field( default=128 ,metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) lowercase_ = field( default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A : lowercase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowercase_ = field( default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,) def snake_case_ (): '''simple docstring''' _a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _a , _a , _a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _a , _a , _a , _a = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _a = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict: _a = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _a = TFTrainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _a = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _a = trainer.evaluate() _a = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(UpperCamelCase ) return results if __name__ == "__main__": main()
22
0
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def UpperCamelCase_( snake_case__: List[Any] , snake_case__: str ) -> int: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer UpperCAmelCase__ = flax_key_tuple[:-1] + ('weight',) UpperCAmelCase__ = torch.permute(snake_case__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ): # linear layer UpperCAmelCase__ = flax_key_tuple[:-1] + ('weight',) UpperCAmelCase__ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCAmelCase__ = flax_key_tuple[:-1] + ('weight',) return flax_key_tuple, flax_tensor def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Optional[int] , snake_case__: List[Any] ) -> Tuple: if "metadata" in layer: UpperCAmelCase__ = layer.split('metadata' ) UpperCAmelCase__ = ''.join(split_layer[0] )[:-1] UpperCAmelCase__ = [tuple(('metadata' + split_layer[1]).split('/' ) )] elif "kvstore" in layer: UpperCAmelCase__ = layer.split('kvstore' ) UpperCAmelCase__ = ''.join(split_layer[0] )[:-1] UpperCAmelCase__ = [tuple(('kvstore' + split_layer[1]).split('/' ) )] else: UpperCAmelCase__ = layer.split('/' ) UpperCAmelCase__ = '/'.join(split_layer[:-1] ) UpperCAmelCase__ = (split_layer[-1],) if "kvstore/path" in layer: UpperCAmelCase__ = f"{switch_checkpoint_path}/{checkpoint_info[layer]}" elif "kvstore/driver" in layer: UpperCAmelCase__ = 'file' else: UpperCAmelCase__ = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def UpperCamelCase_( snake_case__: Dict , snake_case__: Optional[Any] ) -> int: UpperCAmelCase__ = rename_keys(snake_case__ ) UpperCAmelCase__ = {} for k, v in current_block.items(): UpperCAmelCase__ = v UpperCAmelCase__ = new_current_block torch.save(snake_case__ , snake_case__ ) def UpperCamelCase_( snake_case__: Dict , snake_case__: Any , snake_case__: List[str] , snake_case__: Tuple , snake_case__: str = WEIGHTS_NAME ) -> Optional[Any]: UpperCAmelCase__ = convert_file_size_to_int(snake_case__ ) UpperCAmelCase__ = [] UpperCAmelCase__ = {} UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 os.makedirs(snake_case__ , exist_ok=snake_case__ ) with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp: UpperCAmelCase__ = serialization.msgpack_restore(fp.read() )['optimizer']['target'] UpperCAmelCase__ = flatten_dict(snake_case__ , sep='/' ) UpperCAmelCase__ = {} for layer in checkpoint_info.keys(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = get_key_and_tensorstore_dict( snake_case__ , snake_case__ , snake_case__ ) if curr_real_layer_name in all_layers: UpperCAmelCase__ = content else: UpperCAmelCase__ = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file UpperCAmelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() UpperCAmelCase__ = torch.tensor(snake_case__ ) UpperCAmelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts UpperCAmelCase__ , UpperCAmelCase__ = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case__ ) UpperCAmelCase__ = '/'.join(snake_case__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: UpperCAmelCase__ = os.path.join( snake_case__ , weights_name.replace('.bin' , f"-{len(snake_case__ )+1:05d}-of-???.bin" ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) del current_block UpperCAmelCase__ = {} UpperCAmelCase__ = 0 UpperCAmelCase__ = raw_weights.to(getattr(snake_case__ , snake_case__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block UpperCAmelCase__ = os.path.join(snake_case__ , weights_name.replace('.bin' , f"-{len(snake_case__ )+1:05d}-of-???.bin" ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(snake_case__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index UpperCAmelCase__ = {} UpperCAmelCase__ = {} for idx, shard in enumerate(snake_case__ ): UpperCAmelCase__ = weights_name.replace( '.bin' , f"-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin" ) # len(sharded_state_dicts):05d} UpperCAmelCase__ = os.path.join(snake_case__ , weights_name.replace('.bin' , f"-{idx+1:05d}-of-???.bin" ) ) os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) UpperCAmelCase__ = shard for key in shard: UpperCAmelCase__ = shard_file # Add the metadata UpperCAmelCase__ = {'total_size': total_size} UpperCAmelCase__ = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' ) as f: UpperCAmelCase__ = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n' f.write(snake_case__ ) return metadata, index if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''') parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''', type=str, required=False, help='''Path to the output pytorch model.''', ) _UpperCamelCase = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def UpperCamelCase_( ) -> str: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer UpperCAmelCase__ = SwitchTransformersConfig.from_pretrained('google/switch-base-8' ) config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' ) UpperCAmelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained( '/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' ) UpperCAmelCase__ = TaTokenizer.from_pretrained('t5-small' ) UpperCAmelCase__ = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.' UpperCAmelCase__ = tokenizer(snake_case__ , return_tensors='pt' ).input_ids UpperCAmelCase__ = model.generate(snake_case__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
146
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( _a ,unittest.TestCase ): lowercase_ = LEDTokenizer lowercase_ = LEDTokenizerFast lowercase_ = True def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" super().setUp() _a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def __lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _a = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , lowerCAmelCase_ ) self.assertIn('''attention_mask''' , lowerCAmelCase_ ) self.assertNotIn('''labels''' , lowerCAmelCase_ ) self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _a = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer( ['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = ['''A long paragraph for summarization.'''] _a = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' ) _a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' ) _a = inputs['''input_ids'''] _a = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = ['''Summary of the text.''', '''Another summary.'''] _a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']] _a = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = '''A, <mask> AllenNLP sentence.''' _a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
22
0
import requests snake_case__ : Optional[Any] = """YOUR API KEY""" def _snake_case (__lowercase , __lowercase = giphy_api_key): UpperCamelCase_ = '+'.join(query.split()) UpperCamelCase_ = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}""" UpperCamelCase_ = requests.get(__lowercase).json()['data'] return [gif["url"] for gif in gifs] if __name__ == "__main__": print("""\n""".join(get_gifs("""space ship""")))
23
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _snake_case (__lowercase , __lowercase , __lowercase): # Initialise PyTorch model UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase) print(f"""Building PyTorch model from configuration: {config}""") UpperCamelCase_ = MobileBertForPreTraining(__lowercase) # Load weights from tf checkpoint UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , __lowercase) if __name__ == "__main__": snake_case__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
23
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = StableDiffusionSAGPipeline A_ = TEXT_TO_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_BATCH_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) UpperCamelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase ) UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]: if str(_UpperCAmelCase ).startswith('mps' ): UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase ) else: UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCamelCase_ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def _UpperCAmelCase ( self ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) UpperCamelCase_ = output.images assert image.shape == (1, 512, 768, 3)
23
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _a ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_MASKED_LM_MAPPING A_ = TF_MODEL_FOR_MASKED_LM_MAPPING def _UpperCAmelCase ( self ) -> List[str]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() UpperCamelCase_ = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(_UpperCAmelCase ) @slow @require_tf def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.2_5_1, 'token': 2201, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.2_1_4, 'token': 12790, 'token_str': ' Lyon', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) @require_tf def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = fill_masker.tokenizer UpperCamelCase_ = fill_masker.model UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , ) with self.assertRaises(_UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_UpperCAmelCase ): fill_masker('This is' ) self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = sorted(vocab.keys() )[:2] # Pipeline argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Call argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Score equivalence UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs] UpperCamelCase_ = [top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ) == set(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # top_k=2, ntargets=3 UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.get_vocab() # String duplicates + id duplicates UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_UpperCAmelCase ) , 3 ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , )
23
1
from __future__ import annotations def _snake_case (__lowercase): if not nums: return 0 UpperCamelCase_ = nums[0] UpperCamelCase_ = 0 for num in nums[1:]: UpperCamelCase_ , UpperCamelCase_ = ( max_excluding + num, max(__lowercase , __lowercase), ) return max(__lowercase , __lowercase) if __name__ == "__main__": import doctest doctest.testmod()
23
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = StableDiffusionSAGPipeline A_ = TEXT_TO_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_BATCH_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) UpperCamelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase ) UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]: if str(_UpperCAmelCase ).startswith('mps' ): UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase ) else: UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCamelCase_ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def _UpperCAmelCase ( self ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) UpperCamelCase_ = output.images assert image.shape == (1, 512, 768, 3)
23
1
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) UpperCamelCase_ = input_file.read() UpperCamelCase_ = regexp.search(_UpperCAmelCase ) return match def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) UpperCamelCase_ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase_ = regexp.finditer(_UpperCAmelCase ) UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(_UpperCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
23
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar snake_case__ : List[str] = TypeVar("""T""") def _snake_case (__lowercase): return (position - 1) // 2 def _snake_case (__lowercase): return (2 * position) + 1 def _snake_case (__lowercase): return (2 * position) + 2 class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = [] UpperCamelCase_ = {} UpperCamelCase_ = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def _UpperCAmelCase ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) UpperCamelCase_ = self.elements self.elements += 1 self._bubble_up(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) UpperCamelCase_ , UpperCamelCase_ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: UpperCamelCase_ , UpperCamelCase_ = self.heap[0] self._bubble_down(_UpperCAmelCase ) return elem def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Update the weight of the given key UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ = (elem, weight) if position > 0: UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] UpperCamelCase_ = self.position_map[elem] if curr_pos == 0: return None UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_up(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ = get_child_left_position(_UpperCAmelCase ) UpperCamelCase_ = get_child_right_position(_UpperCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) if child_left_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) else: return None if child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Swap the nodes at the given positions UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ , UpperCamelCase_ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) UpperCamelCase_ = nodea_pos UpperCamelCase_ = nodea_pos class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = {} UpperCamelCase_ = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: UpperCamelCase_ = {} self.nodes += 1 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(_UpperCAmelCase ) self.add_node(_UpperCAmelCase ) UpperCamelCase_ = weight UpperCamelCase_ = weight def _snake_case (__lowercase , ): UpperCamelCase_ = {node: maxsize for node in graph.connections} UpperCamelCase_ = {node: None for node in graph.connections} UpperCamelCase_ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__lowercase , __lowercase) if priority_queue.is_empty(): return dist, parent # initialization UpperCamelCase_ = priority_queue.extract_min() UpperCamelCase_ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node # running prim's algorithm while not priority_queue.is_empty(): UpperCamelCase_ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node return dist, parent
23
1
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _snake_case (__lowercase , __lowercase , __lowercase): # Initialise PyTorch model UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase) print(f"""Building PyTorch model from configuration: {config}""") UpperCamelCase_ = MobileBertForPreTraining(__lowercase) # Load weights from tf checkpoint UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , __lowercase) if __name__ == "__main__": snake_case__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
23
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar snake_case__ : Dict = TypeVar("""T""") class _a ( Generic[T] ): """simple docstring""" A_ = 42 # Cache store of keys A_ = 42 # References of the keys in cache A_ = 10 # Maximum capacity of cache def __init__( self , _UpperCAmelCase ) -> None: UpperCamelCase_ = deque() UpperCamelCase_ = set() if not n: UpperCamelCase_ = sys.maxsize elif n < 0: raise ValueError('n should be an integer greater than 0.' ) else: UpperCamelCase_ = n def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: UpperCamelCase_ = self.dq_store.pop() self.key_reference.remove(_UpperCAmelCase ) else: self.dq_store.remove(_UpperCAmelCase ) self.dq_store.appendleft(_UpperCAmelCase ) self.key_reference.add(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> None: for k in self.dq_store: print(_UpperCAmelCase ) def __repr__( self ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : LRUCache[str | int] = LRUCache(4) lru_cache.refer("""A""") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("""A""") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
23
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Optional[Any] = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """trocr""" A_ = ["""past_key_values"""] A_ = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ) -> str: UpperCamelCase_ = vocab_size UpperCamelCase_ = d_model UpperCamelCase_ = decoder_layers UpperCamelCase_ = decoder_attention_heads UpperCamelCase_ = decoder_ffn_dim UpperCamelCase_ = activation_function UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = dropout UpperCamelCase_ = attention_dropout UpperCamelCase_ = activation_dropout UpperCamelCase_ = init_std UpperCamelCase_ = decoder_layerdrop UpperCamelCase_ = use_cache UpperCamelCase_ = scale_embedding UpperCamelCase_ = use_learned_position_embeddings UpperCamelCase_ = layernorm_embedding super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
23
import numpy as np def _snake_case (__lowercase): return 1 / (1 + np.exp(-vector)) def _snake_case (__lowercase): return vector * sigmoid(__lowercase) if __name__ == "__main__": import doctest doctest.testmod()
23
1
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 class _a ( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self , _UpperCAmelCase = 32 , _UpperCAmelCase = 64 , _UpperCAmelCase = 20 , _UpperCAmelCase = 768 , _UpperCAmelCase=77 , _UpperCAmelCase=4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = "silu" , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "linear" , _UpperCAmelCase = "prd" , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> Any: super().__init__() UpperCamelCase_ = num_attention_heads UpperCamelCase_ = attention_head_dim UpperCamelCase_ = num_attention_heads * attention_head_dim UpperCamelCase_ = additional_embeddings UpperCamelCase_ = time_embed_dim or inner_dim UpperCamelCase_ = embedding_proj_dim or embedding_dim UpperCamelCase_ = clip_embed_dim or embedding_dim UpperCamelCase_ = Timesteps(_UpperCAmelCase , _UpperCAmelCase , 0 ) UpperCamelCase_ = TimestepEmbedding(_UpperCAmelCase , _UpperCAmelCase , out_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase ) UpperCamelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) if embedding_proj_norm_type is None: UpperCamelCase_ = None elif embedding_proj_norm_type == "layer": UpperCamelCase_ = nn.LayerNorm(_UpperCAmelCase ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) UpperCamelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) if encoder_hid_proj_type is None: UpperCamelCase_ = None elif encoder_hid_proj_type == "linear": UpperCamelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) UpperCamelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _UpperCAmelCase ) ) if added_emb_type == "prd": UpperCamelCase_ = nn.Parameter(torch.zeros(1 , 1 , _UpperCAmelCase ) ) elif added_emb_type is None: UpperCamelCase_ = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) UpperCamelCase_ = nn.ModuleList( [ BasicTransformerBlock( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dropout=_UpperCAmelCase , activation_fn='gelu' , attention_bias=_UpperCAmelCase , ) for d in range(_UpperCAmelCase ) ] ) if norm_in_type == "layer": UpperCamelCase_ = nn.LayerNorm(_UpperCAmelCase ) elif norm_in_type is None: UpperCamelCase_ = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) UpperCamelCase_ = nn.LayerNorm(_UpperCAmelCase ) UpperCamelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 ) causal_attention_mask.triu_(1 ) UpperCamelCase_ = causal_attention_mask[None, ...] self.register_buffer('causal_attention_mask' , _UpperCAmelCase , persistent=_UpperCAmelCase ) UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _UpperCAmelCase ) ) UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _UpperCAmelCase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]: UpperCamelCase_ = {} def fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if hasattr(_UpperCAmelCase , 'set_processor' ): UpperCamelCase_ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , _UpperCAmelCase , _UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return processors def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = len(self.attn_processors.keys() ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if hasattr(_UpperCAmelCase , 'set_processor' ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): module.set_processor(_UpperCAmelCase ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _UpperCAmelCase , _UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: self.set_attn_processor(AttnProcessor() ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[str, Any]: UpperCamelCase_ = hidden_states.shape[0] UpperCamelCase_ = timestep if not torch.is_tensor(_UpperCAmelCase ): UpperCamelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0: UpperCamelCase_ = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCamelCase_ = timesteps * torch.ones(_UpperCAmelCase , dtype=timesteps.dtype , device=timesteps.device ) UpperCamelCase_ = self.time_proj(_UpperCAmelCase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. UpperCamelCase_ = timesteps_projected.to(dtype=self.dtype ) UpperCamelCase_ = self.time_embedding(_UpperCAmelCase ) if self.embedding_proj_norm is not None: UpperCamelCase_ = self.embedding_proj_norm(_UpperCAmelCase ) UpperCamelCase_ = self.embedding_proj(_UpperCAmelCase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: UpperCamelCase_ = self.encoder_hidden_states_proj(_UpperCAmelCase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' ) UpperCamelCase_ = self.proj_in(_UpperCAmelCase ) UpperCamelCase_ = self.positional_embedding.to(hidden_states.dtype ) UpperCamelCase_ = [] UpperCamelCase_ = 0 if encoder_hidden_states is not None: additional_embeds.append(_UpperCAmelCase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: UpperCamelCase_ = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: UpperCamelCase_ = hidden_states[:, None, :] UpperCamelCase_ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: UpperCamelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(_UpperCAmelCase , -1 , -1 ) additional_embeds.append(_UpperCAmelCase ) UpperCamelCase_ = torch.cat( _UpperCAmelCase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens UpperCamelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: UpperCamelCase_ = F.pad( _UpperCAmelCase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) UpperCamelCase_ = hidden_states + positional_embeddings if attention_mask is not None: UpperCamelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0 UpperCamelCase_ = F.pad(_UpperCAmelCase , (0, self.additional_embeddings) , value=0.0 ) UpperCamelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) UpperCamelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: UpperCamelCase_ = self.norm_in(_UpperCAmelCase ) for block in self.transformer_blocks: UpperCamelCase_ = block(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) UpperCamelCase_ = self.norm_out(_UpperCAmelCase ) if self.prd_embedding is not None: UpperCamelCase_ = hidden_states[:, -1] else: UpperCamelCase_ = hidden_states[:, additional_embeddings_len:] UpperCamelCase_ = self.proj_to_clip_embeddings(_UpperCAmelCase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
23
import math from datetime import datetime, timedelta def _snake_case (__lowercase): UpperCamelCase_ = year % 19 UpperCamelCase_ = year % 4 UpperCamelCase_ = year % 7 UpperCamelCase_ = math.floor(year / 100) UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25) UpperCamelCase_ = leap_day_inhibits / 4 UpperCamelCase_ = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon UpperCamelCase_ = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 19) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 18) else: return datetime(__lowercase , 3 , 22) + timedelta( days=int(days_to_add + days_from_phm_to_sunday)) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): snake_case__ : Dict = """will be""" if year > datetime.now().year else """was""" print(f'Easter in {year} {tense} {gauss_easter(year)}')
23
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = inspect.getfile(accelerate.test_utils ) UpperCamelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) UpperCamelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] ) UpperCamelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] ) @require_multi_gpu def _UpperCAmelCase ( self ) -> Dict: print(f"""Found {torch.cuda.device_count()} devices.""" ) UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> Optional[Any]: print(f"""Found {torch.cuda.device_count()} devices.""" ) UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(f"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> List[str]: print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) UpperCamelCase_ = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ): execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": snake_case__ : Dict = Accelerator() snake_case__ : str = (accelerator.state.process_index + 2, 1_0) snake_case__ : int = torch.randint(0, 1_0, shape).to(accelerator.device) snake_case__ : List[Any] = """""" snake_case__ : Any = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." snake_case__ : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." snake_case__ : Optional[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
23
import requests def _snake_case (__lowercase , __lowercase): UpperCamelCase_ = {'Content-Type': 'application/json'} UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase) if response.status_code != 200: UpperCamelCase_ = ( 'Request to slack returned an error ' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(__lowercase) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
23
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case__ : Dict = logging.get_logger(__name__) snake_case__ : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} snake_case__ : List[Any] = { """tokenizer_file""": { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""", }, } snake_case__ : Dict = { """gpt-neox-20b""": 2_0_4_8, } class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ["""input_ids""", """attention_mask"""] def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> Tuple: super().__init__( _UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space: UpperCamelCase_ = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) ) UpperCamelCase_ = add_prefix_space UpperCamelCase_ = pre_tok_class(**_UpperCAmelCase ) UpperCamelCase_ = add_prefix_space def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: UpperCamelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[int]: UpperCamelCase_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] ) if len(_UpperCAmelCase ) > self.model_max_length: UpperCamelCase_ = input_ids[-self.model_max_length :] return input_ids
23
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) UpperCamelCase_ = input_file.read() UpperCamelCase_ = regexp.search(_UpperCAmelCase ) return match def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) UpperCamelCase_ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase_ = regexp.finditer(_UpperCAmelCase ) UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(_UpperCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
23
1
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params snake_case__ : List[Any] = getLogger(__name__) snake_case__ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu""" def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 8 , __lowercase = DEFAULT_DEVICE , __lowercase=False , __lowercase="summarization" , __lowercase=None , **__lowercase , ): UpperCamelCase_ = Path(__lowercase).open('w' , encoding='utf-8') UpperCamelCase_ = str(__lowercase) UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase).to(__lowercase) if fpaa: UpperCamelCase_ = model.half() UpperCamelCase_ = AutoTokenizer.from_pretrained(__lowercase) logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""") # if this is wrong, check config.model_type. UpperCamelCase_ = time.time() # update config with task specific params use_task_specific_params(__lowercase , __lowercase) if prefix is None: UpperCamelCase_ = prefix or getattr(model.config , 'prefix' , '') or '' for examples_chunk in tqdm(list(chunks(__lowercase , __lowercase))): UpperCamelCase_ = [prefix + text for text in examples_chunk] UpperCamelCase_ = tokenizer(__lowercase , return_tensors='pt' , truncation=__lowercase , padding='longest').to(__lowercase) UpperCamelCase_ = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__lowercase , ) UpperCamelCase_ = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase) for hypothesis in dec: fout.write(hypothesis + '\n') fout.flush() fout.close() UpperCamelCase_ = int(time.time() - start_time) # seconds UpperCamelCase_ = len(__lowercase) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4)} def _snake_case (): return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') def _snake_case (__lowercase=True): UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument('model_name' , type=__lowercase , help='like facebook/bart-large-cnn,t5-base, etc.') parser.add_argument('input_path' , type=__lowercase , help='like cnn_dm/test.source') parser.add_argument('save_path' , type=__lowercase , help='where to save summaries') parser.add_argument('--reference_path' , type=__lowercase , required=__lowercase , help='like cnn_dm/test.target') parser.add_argument('--score_path' , type=__lowercase , required=__lowercase , default='metrics.json' , help='where to save metrics') parser.add_argument('--device' , type=__lowercase , required=__lowercase , default=__lowercase , help='cuda, cuda:1, cpu etc.') parser.add_argument( '--prefix' , type=__lowercase , required=__lowercase , default=__lowercase , help='will be added to the begininng of src examples') parser.add_argument('--task' , type=__lowercase , default='summarization' , help='used for task_specific_params + metrics') parser.add_argument('--bs' , type=__lowercase , default=8 , required=__lowercase , help='batch size') parser.add_argument( '--n_obs' , type=__lowercase , default=-1 , required=__lowercase , help='How many observations. Defaults to all.') parser.add_argument('--fp16' , action='store_true') parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results') parser.add_argument( '--info' , nargs='?' , type=__lowercase , const=datetime_now() , help=( 'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.' ' lang=en-ru. If no value is passed, the current datetime string will be used.' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate UpperCamelCase_ , UpperCamelCase_ = parser.parse_known_args() UpperCamelCase_ = parse_numeric_n_bool_cl_kwargs(__lowercase) if parsed_args and verbose: print(f"""parsed the following generate kwargs: {parsed_args}""") UpperCamelCase_ = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path).readlines()] if args.n_obs > 0: UpperCamelCase_ = examples[: args.n_obs] Path(args.save_path).parent.mkdir(exist_ok=__lowercase) if args.reference_path is None and Path(args.score_path).exists(): warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""") if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('Can\'t mix --fp16 and --device cpu') UpperCamelCase_ = generate_summaries_or_translations( __lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__lowercase , ) if args.reference_path is None: return {} # Compute scores UpperCamelCase_ = calculate_bleu if 'translation' in args.task else calculate_rouge UpperCamelCase_ = [x.rstrip() for x in open(args.save_path).readlines()] UpperCamelCase_ = [x.rstrip() for x in open(args.reference_path).readlines()][: len(__lowercase)] UpperCamelCase_ = score_fn(__lowercase , __lowercase) scores.update(__lowercase) if args.dump_args: scores.update(__lowercase) if args.info: UpperCamelCase_ = args.info if verbose: print(__lowercase) if args.score_path is not None: json.dump(__lowercase , open(args.score_path , 'w')) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
23
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ): set_seed(3) # generate train_data and objective_set UpperCamelCase_ , UpperCamelCase_ = generate_datasets( __lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase) # keeps model same across runs set_seed(4) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # load pretrained model UpperCamelCase_ = load_gpta('gpt2').to(__lowercase) print('computing perplexity on objective set') UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item() print('perplexity on objective set:' , __lowercase) # collect igf pairs and save to file demo.jbl collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ): set_seed(42) # Load pre-trained model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') # Initialize secondary learner to use embedding weights of model UpperCamelCase_ = SecondaryLearner(__lowercase) # Train secondary learner UpperCamelCase_ = train_secondary_learner( __lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ): UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') UpperCamelCase_ = RandomSampler(__lowercase) UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase) UpperCamelCase_ = max_steps // (len(__lowercase)) + 1 UpperCamelCase_ = 0 UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase) model.train() if secondary_learner is not None: secondary_learner.to(__lowercase) secondary_learner.eval() UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [] UpperCamelCase_ = [] # Compute the performance of the transformer model at the beginning UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) for epoch in range(int(__lowercase)): for step, example in enumerate(__lowercase): torch.cuda.empty_cache() UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1) UpperCamelCase_ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase_ = model(__lowercase , labels=__lowercase) UpperCamelCase_ = True if secondary_learner is not None: UpperCamelCase_ = secondary_learner.forward( torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item() observed_qs.append(float(__lowercase)) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase_ = -1 if predicted_q < threshold: UpperCamelCase_ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu())) UpperCamelCase_ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase_ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __lowercase) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task') # Required parameters parser.add_argument( '--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=__lowercase , default=__lowercase , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.') parser.add_argument( '--context_len' , default=32 , type=__lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq') parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs') parser.add_argument( '--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ') parser.add_argument( '--eval_interval' , default=10 , type=__lowercase , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data') parser.add_argument( '--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set') parser.add_argument( '--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner') parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length') parser.add_argument( '--threshold' , default=1.0 , type=__lowercase , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name') parser.add_argument( '--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner UpperCamelCase_ = joblib.load('data/IGF_values.jbl') # Train secondary learner UpperCamelCase_ = training_secondary_learner( __lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') set_seed(42) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase_ , UpperCamelCase_ = generate_datasets( context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
23
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case__ : str = """CompVis/stable-diffusion-v1-1""" snake_case__ : Optional[Any] = """CompVis/stable-diffusion-v1-2""" snake_case__ : List[Any] = """CompVis/stable-diffusion-v1-3""" snake_case__ : int = """CompVis/stable-diffusion-v1-4""" class _a ( UpperCAmelCase__ ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ) -> List[str]: super()._init_() UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ) UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ) UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ) UpperCamelCase_ = StableDiffusionPipeline( vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , requires_safety_checker=_UpperCAmelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def _UpperCAmelCase ( self ) -> Dict[str, Any]: return {k: getattr(self , _UpperCAmelCase ) for k in self.config.keys() if not k.startswith('_' )} def _UpperCAmelCase ( self , _UpperCAmelCase = "auto" ) -> List[str]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: self.enable_attention_slicing(_UpperCAmelCase ) @torch.no_grad() def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> Any: return self.pipea( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) @torch.no_grad() def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> List[Any]: return self.pipea( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) @torch.no_grad() def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> Optional[int]: return self.pipea( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) @torch.no_grad() def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> Tuple: return self.pipea( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) @torch.no_grad() def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> List[Any]: UpperCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(_UpperCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCamelCase_ = self.textaimg_sda_a( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCamelCase_ = self.textaimg_sda_a( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCamelCase_ = self.textaimg_sda_a( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCamelCase_ = self.textaimg_sda_a( prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
23
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _a : """simple docstring""" A_ = MBartConfig A_ = {} A_ = """gelu""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = eos_token_id UpperCamelCase_ = pad_token_id UpperCamelCase_ = bos_token_id def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder() UpperCamelCase_ = inputs_dict['input_ids'] UpperCamelCase_ = input_ids[:1, :] UpperCamelCase_ = inputs_dict['attention_mask'][:1, :] UpperCamelCase_ = inputs_dict['head_mask'] UpperCamelCase_ = 1 # first forward pass UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple() UpperCamelCase_ = past_key_values[1] def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ): if attention_mask is None: UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta) if decoder_attention_mask is None: UpperCamelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta), ] , axis=-1 , ) if head_mask is None: UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else () A_ = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) A_ = True A_ = False A_ = False def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = TFMBartModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _a ( unittest.TestCase ): """simple docstring""" A_ = [ """ UN Chief Says There Is No Military Solution in Syria""", ] A_ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] A_ = """facebook/mbart-large-en-ro""" @cached_property def _UpperCAmelCase ( self ) -> Any: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int: UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase ) self.assertListEqual(self.expected_text , _UpperCAmelCase ) def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]: UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' ) UpperCamelCase_ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def _UpperCAmelCase ( self ) -> List[Any]: self._assert_generated_batch_equal_expected()
23
1
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase): if index == number_of_items: return 0 UpperCamelCase_ = 0 UpperCamelCase_ = 0 UpperCamelCase_ = knapsack(__lowercase , __lowercase , __lowercase , __lowercase , index + 1) if weights[index] <= max_weight: UpperCamelCase_ = values[index] + knapsack( __lowercase , __lowercase , __lowercase , max_weight - weights[index] , index + 1) return max(__lowercase , __lowercase) if __name__ == "__main__": import doctest doctest.testmod()
23
def _snake_case (__lowercase): UpperCamelCase_ = 1 for i in range(1 , num + 1): fact *= i return fact def _snake_case (__lowercase): UpperCamelCase_ = 0 while number > 0: UpperCamelCase_ = number % 10 sum_of_digits += last_digit UpperCamelCase_ = number // 10 # Removing the last_digit from the given number return sum_of_digits def _snake_case (__lowercase = 100): UpperCamelCase_ = factorial(__lowercase) UpperCamelCase_ = split_and_add(__lowercase) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
23
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 A_ = 42 A_ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
23
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case__ : str = logging.get_logger(__name__) def _snake_case (__lowercase): if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(__lowercase): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""") class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = ["""pixel_values"""] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None: super().__init__(**_UpperCAmelCase ) UpperCamelCase_ = size if size is not None else {'shortest_edge': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' ) UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = do_center_crop UpperCamelCase_ = crop_size UpperCamelCase_ = resample UpperCamelCase_ = do_rescale UpperCamelCase_ = rescale_factor UpperCamelCase_ = do_normalize UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" in size: UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase ) elif "height" in size and "width" in size: UpperCamelCase_ = (size['height'], size['width']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int: return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. UpperCamelCase_ = to_numpy_array(_UpperCAmelCase ) if do_resize: UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) if do_center_crop: UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase ) if do_rescale: UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) if do_normalize: UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) return image def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image: UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' ) if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) UpperCamelCase_ = make_batched(_UpperCAmelCase ) UpperCamelCase_ = [ [ self._preprocess_image( image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , ) for img in video ] for video in videos ] UpperCamelCase_ = {'pixel_values': videos} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
23
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) snake_case__ : Dict = { """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = ["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[int] = [ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys snake_case__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 A_ = 42 class _a ( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" A_ = 1 @register_to_config def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple: # standard deviation of the initial noise distribution UpperCamelCase_ = sigma_max # setable values UpperCamelCase_ = None self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor: return sample def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str: UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any: UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) UpperCamelCase_ = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device ) UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device ) UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device ) UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase ) UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods UpperCamelCase_ = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): UpperCamelCase_ = diffusion.unsqueeze(-1 ) UpperCamelCase_ = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of UpperCamelCase_ = randn_tensor( sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype ) UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term UpperCamelCase_ = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): UpperCamelCase_ = step_size.unsqueeze(-1 ) UpperCamelCase_ = sample + step_size * model_output UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCamelCase_ = timesteps.to(original_samples.device ) UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps] UpperCamelCase_ = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None] ) UpperCamelCase_ = noise + original_samples return noisy_samples def __len__( self ) -> Optional[int]: return self.config.num_train_timesteps
23
1
from __future__ import annotations class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: UpperCamelCase_ , UpperCamelCase_ = text, pattern UpperCamelCase_ , UpperCamelCase_ = len(_UpperCAmelCase ), len(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _UpperCAmelCase ( self ) -> list[int]: # searches pattern in text and returns index positions UpperCamelCase_ = [] for i in range(self.textLen - self.patLen + 1 ): UpperCamelCase_ = self.mismatch_in_text(_UpperCAmelCase ) if mismatch_index == -1: positions.append(_UpperCAmelCase ) else: UpperCamelCase_ = self.match_in_pattern(self.text[mismatch_index] ) UpperCamelCase_ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions snake_case__ : List[str] = """ABAABA""" snake_case__ : Tuple = """AB""" snake_case__ : Tuple = BoyerMooreSearch(text, pattern) snake_case__ : Union[str, Any] = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
23
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : Optional[int] = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
1
def _snake_case (__lowercase): UpperCamelCase_ = abs(__lowercase) UpperCamelCase_ = 0 while n > 0: res += n % 10 n //= 10 return res def _snake_case (__lowercase): UpperCamelCase_ = abs(__lowercase) return n if n < 10 else n % 10 + sum_of_digits(n // 10) def _snake_case (__lowercase): return sum(int(__lowercase) for c in str(abs(__lowercase))) def _snake_case (): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__lowercase , __lowercase) -> None: UpperCamelCase_ = f"""{func.__name__}({value})""" UpperCamelCase_ = timeit(f"""__main__.{call}""" , setup='import __main__') print(f"""{call:56} = {func(__lowercase)} -- {timing:.4f} seconds""") for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__lowercase , __lowercase) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
23
import datasets from .evaluate import evaluate snake_case__ : int = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ snake_case__ : Union[str, Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ snake_case__ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def _UpperCAmelCase ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': { 'id': datasets.Value('string' ), 'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ), }, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions} UpperCamelCase_ = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase ) return score
23
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : Dict = { """configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : str = [ """MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegatronBertForCausalLM""", """MegatronBertForMaskedLM""", """MegatronBertForMultipleChoice""", """MegatronBertForNextSentencePrediction""", """MegatronBertForPreTraining""", """MegatronBertForQuestionAnswering""", """MegatronBertForSequenceClassification""", """MegatronBertForTokenClassification""", """MegatronBertModel""", """MegatronBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class _a ( datasets.BeamBasedBuilder ): """simple docstring""" def _UpperCAmelCase ( self ) -> List[str]: return datasets.DatasetInfo( features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )] def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class _a ( datasets.BeamBasedBuilder ): """simple docstring""" def _UpperCAmelCase ( self ) -> Any: return datasets.DatasetInfo( features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} ) ] def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def _snake_case (): return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])] def _snake_case (): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])] class _a ( UpperCAmelCase__ ): """simple docstring""" @require_beam def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def _UpperCAmelCase ( self ) -> List[str]: import apache_beam as beam UpperCamelCase_ = beam.io.parquetio.WriteToParquet UpperCamelCase_ = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock: UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def _UpperCAmelCase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset
23
1
import requests def _snake_case (__lowercase , __lowercase): UpperCamelCase_ = {'Content-Type': 'application/json'} UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase) if response.status_code != 200: UpperCamelCase_ = ( 'Request to slack returned an error ' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(__lowercase) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
23
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _snake_case (__lowercase , __lowercase , __lowercase): # Initialise PyTorch model UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase) print(f"""Building PyTorch model from configuration: {config}""") UpperCamelCase_ = AlbertForPreTraining(__lowercase) # Load weights from tf checkpoint load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , __lowercase) if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
23
1
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging snake_case__ : Optional[int] = logging.get_logger(__name__) def _snake_case (__lowercase , __lowercase , __lowercase): UpperCamelCase_ = nn.ModuleList([src_layers[i] for i in layers_to_copy]) assert len(__lowercase) == len(__lowercase), f"""{len(__lowercase)} != {len(__lowercase)}""" dest_layers.load_state_dict(layers_to_copy.state_dict()) snake_case__ : Tuple = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 1_2: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 1_1], 4: [0, 4, 8, 1_1], 6: [0, 2, 4, 7, 9, 1_1], 9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1], 1_2: list(range(1_2)), }, 1_6: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 1_5], 3: [0, 8, 1_5], 4: [0, 5, 1_0, 1_5], 6: [0, 3, 6, 9, 1_2, 1_5], 8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5], 9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5], 1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5], 1_6: list(range(1_6)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } snake_case__ : Any = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]}, 1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]}, } def _snake_case (__lowercase , __lowercase): try: UpperCamelCase_ = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" f""" {n_student}""") return list(range(__lowercase)) def _snake_case (__lowercase , __lowercase): if n_student > n_teacher: raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""") elif n_teacher == n_student: return list(range(__lowercase)) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def _snake_case (__lowercase , __lowercase = "student" , __lowercase = None , __lowercase = None , __lowercase=False , __lowercase=None , __lowercase=None , **__lowercase , ): UpperCamelCase_ = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.' assert (e is not None) or (d is not None), _msg if isinstance(__lowercase , __lowercase): AutoTokenizer.from_pretrained(__lowercase).save_pretrained(__lowercase) # purely for convenience UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase).eval() else: assert isinstance(__lowercase , __lowercase), f"""teacher must be a model or string got type {type(__lowercase)}""" UpperCamelCase_ = teacher.config.to_diff_dict() try: UpperCamelCase_ , UpperCamelCase_ = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: UpperCamelCase_ = teacher_e if d is None: UpperCamelCase_ = teacher_d init_kwargs.update({'encoder_layers': e, 'decoder_layers': d}) except AttributeError: # T5 if hasattr(teacher.config , 'num_encoder_layers'): UpperCamelCase_ , UpperCamelCase_ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: UpperCamelCase_ , UpperCamelCase_ = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: UpperCamelCase_ = teacher_e if d is None: UpperCamelCase_ = teacher_d if hasattr(teacher.config , 'num_encoder_layers'): init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d}) else: init_kwargs.update({'num_layers': e, 'num_decoder_layers': d}) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__lowercase) # Copy weights UpperCamelCase_ = teacher.config_class(**__lowercase) UpperCamelCase_ = AutoModelForSeqaSeqLM.from_config(__lowercase) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. UpperCamelCase_ = student.load_state_dict(teacher.state_dict() , strict=__lowercase) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save UpperCamelCase_ , UpperCamelCase_ = list(range(__lowercase)), list(range(__lowercase)) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" f""" {save_path}""") student.save_pretrained(__lowercase) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: UpperCamelCase_ = pick_layers_to_copy(__lowercase , __lowercase) if d_layers_to_copy is None: UpperCamelCase_ = pick_layers_to_copy(__lowercase , __lowercase) try: if hasattr( __lowercase , 'prophetnet'): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowercase) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowercase) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowercase) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowercase) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , __lowercase) copy_layers(teacher.decoder.block , student.decoder.block , __lowercase) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""") UpperCamelCase_ = { 'teacher_type': teacher.config.model_type, 'copied_encoder_layers': e_layers_to_copy, 'copied_decoder_layers': d_layers_to_copy, } student.save_pretrained(__lowercase) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
23
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _a ( UpperCAmelCase__ ): """simple docstring""" @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' ) UpperCamelCase_ = bertabert.config.encoder.vocab_size UpperCamelCase_ = tokenizer.sep_token_id UpperCamelCase_ = tokenizer.cls_token_id UpperCamelCase_ = 128 UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) UpperCamelCase_ = train_dataset.select(range(32 ) ) UpperCamelCase_ = val_dataset.select(range(16 ) ) UpperCamelCase_ = 4 def _map_to_encoder_decoder_inputs(_UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 ) UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 ) UpperCamelCase_ = inputs.input_ids UpperCamelCase_ = inputs.attention_mask UpperCamelCase_ = outputs.input_ids UpperCamelCase_ = outputs.input_ids.copy() UpperCamelCase_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] UpperCamelCase_ = outputs.attention_mask assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_UpperCAmelCase ): UpperCamelCase_ = pred.label_ids UpperCamelCase_ = pred.predictions # all unnecessary tokens are removed UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset UpperCamelCase_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset UpperCamelCase_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) UpperCamelCase_ = self.get_auto_remove_tmp_dir() UpperCamelCase_ = SeqaSeqTrainingArguments( output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer UpperCamelCase_ = SeqaSeqTrainer( model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , ) # start training trainer.train()
23
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging snake_case__ : List[str] = logging.get_logger(__name__) if is_vision_available(): import PIL class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = ["""pixel_values"""] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ) -> None: super().__init__(**_UpperCAmelCase ) UpperCamelCase_ = size if size is not None else {'shortest_edge': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='crop_size' ) UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = resample UpperCamelCase_ = do_center_crop UpperCamelCase_ = crop_size UpperCamelCase_ = do_rescale UpperCamelCase_ = rescale_factor UpperCamelCase_ = do_normalize UpperCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD UpperCamelCase_ = do_convert_rgb def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> str: return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image: UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='size' , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCamelCase_ = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCamelCase_ = [convert_to_rgb(_UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. UpperCamelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: UpperCamelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_center_crop: UpperCamelCase_ = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images] if do_rescale: UpperCamelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: UpperCamelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] UpperCamelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] UpperCamelCase_ = {'pixel_values': images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
23
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ : Dict = 1_6 snake_case__ : List[str] = 3_2 def _snake_case (__lowercase , __lowercase = 16): UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased') UpperCamelCase_ = load_dataset('glue' , 'mrpc') def tokenize_function(__lowercase): # max_length=None => use the model max length (it's actually the default) UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase_ = datasets.map( __lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(__lowercase): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase_ = 16 elif accelerator.mixed_precision != "no": UpperCamelCase_ = 8 else: UpperCamelCase_ = None return tokenizer.pad( __lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , ) # Instantiate dataloaders. UpperCamelCase_ = DataLoader( tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) UpperCamelCase_ = DataLoader( tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case__ : List[str] = mocked_dataloaders # noqa: F811 def _snake_case (__lowercase , __lowercase): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1": UpperCamelCase_ = 2 # New Code # UpperCamelCase_ = int(args.gradient_accumulation_steps) # Initialize accelerator UpperCamelCase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`') # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase_ = config['lr'] UpperCamelCase_ = int(config['num_epochs']) UpperCamelCase_ = int(config['seed']) UpperCamelCase_ = int(config['batch_size']) UpperCamelCase_ = evaluate.load('glue' , 'mrpc') set_seed(__lowercase) UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase_ = model.to(accelerator.device) # Instantiate optimizer UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase) # Instantiate scheduler UpperCamelCase_ = get_linear_schedule_with_warmup( optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # Now we train the model for epoch in range(__lowercase): model.train() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowercase): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = output.loss accelerator.backward(__lowercase) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = outputs.logits.argmax(dim=-1) UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=__lowercase , references=__lowercase , ) UpperCamelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowercase) def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(__lowercase , __lowercase) if __name__ == "__main__": main()
23
1
from __future__ import annotations def _snake_case (__lowercase , __lowercase , __lowercase): if (voltage, current, resistance).count(0) != 1: raise ValueError('One and only one argument must be 0') if resistance < 0: raise ValueError('Resistance cannot be negative') if voltage == 0: return {"voltage": float(current * resistance)} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('Exactly one argument must be 0') if __name__ == "__main__": import doctest doctest.testmod()
23
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = is_training UpperCamelCase_ = use_auxiliary_loss UpperCamelCase_ = num_queries UpperCamelCase_ = num_channels UpperCamelCase_ = min_size UpperCamelCase_ = max_size UpperCamelCase_ = num_labels UpperCamelCase_ = hidden_dim UpperCamelCase_ = hidden_dim def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _UpperCAmelCase ) UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase ) UpperCamelCase_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5 ).float() UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long() UpperCamelCase_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = MaskaFormerConfig( hidden_size=self.hidden_dim , ) UpperCamelCase_ = self.num_queries UpperCamelCase_ = self.num_labels UpperCamelCase_ = [1, 1, 1, 1] UpperCamelCase_ = self.num_channels UpperCamelCase_ = 64 UpperCamelCase_ = 128 UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim return config def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = output.encoder_hidden_states UpperCamelCase_ = output.pixel_decoder_hidden_states UpperCamelCase_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any: with torch.no_grad(): UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() def comm_check_on_output(_UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) UpperCamelCase_ = model( pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _UpperCAmelCase ( self ) -> int: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> str: pass def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ = [*signature.parameters.keys()] UpperCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = (self.model_tester.min_size,) * 2 UpperCamelCase_ = { 'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ), 'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ), 'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(), } UpperCamelCase_ = self.model_tester.get_config() UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _UpperCAmelCase ( self ) -> List[Any]: if not self.model_tester.is_training: return UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss loss.backward() def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = True UpperCamelCase_ = True UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) UpperCamelCase_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) snake_case__ : List[Any] = 1E-4 def _snake_case (): UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_vision @slow class _a ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _UpperCAmelCase ( self ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ) UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) UpperCamelCase_ = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) # masks_queries_logits UpperCamelCase_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) UpperCamelCase_ = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) # class_queries_logits UpperCamelCase_ = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) UpperCamelCase_ = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase ) UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']] UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']] with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
23
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _snake_case (__lowercase): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _a ( nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: super().__init__() UpperCamelCase_ = module UpperCamelCase_ = nn.Sequential( nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , ) UpperCamelCase_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCAmelCase ( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _a ( unittest.TestCase ): """simple docstring""" A_ = """bigscience/bloom-1b7""" # Constant values A_ = 2.109_659_552_692_574 A_ = """Hello my name is""" A_ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) A_ = 10 def _UpperCAmelCase ( self ) -> List[Any]: # Models and tokenizer UpperCamelCase_ = AutoTokenizer.from_pretrained(self.model_name ) class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> List[Any]: super().setUp() # Models and tokenizer UpperCamelCase_ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def _UpperCAmelCase ( self ) -> Dict: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ = self.model_abit.config self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) ) UpperCamelCase_ = config.to_dict() UpperCamelCase_ = config.to_diff_dict() UpperCamelCase_ = config.to_json_string() def _UpperCAmelCase ( self ) -> int: from bitsandbytes.nn import Paramsabit UpperCamelCase_ = self.model_fpaa.get_memory_footprint() UpperCamelCase_ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) UpperCamelCase_ = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCAmelCase ( self ) -> Any: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ) UpperCamelCase_ = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ = BitsAndBytesConfig() UpperCamelCase_ = True UpperCamelCase_ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ) UpperCamelCase_ = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def _UpperCAmelCase ( self ) -> int: with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = BitsAndBytesConfig() with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def _UpperCAmelCase ( self ) -> Optional[Any]: with self.assertRaises(_UpperCAmelCase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ) UpperCamelCase_ = self.model_fpaa.to(torch.floataa ) UpperCamelCase_ = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error UpperCamelCase_ = self.model_fpaa.to('cpu' ) # Check this does not throw an error UpperCamelCase_ = self.model_fpaa.half() # Check this does not throw an error UpperCamelCase_ = self.model_fpaa.float() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _a ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCAmelCase ( cls ) -> Tuple: UpperCamelCase_ = 't5-small' UpperCamelCase_ = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense UpperCamelCase_ = AutoTokenizer.from_pretrained(cls.model_name ) UpperCamelCase_ = 'Translate in German: Hello, my dog is cute' def _UpperCAmelCase ( self ) -> List[Any]: gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Any: from transformers import TaForConditionalGeneration UpperCamelCase_ = TaForConditionalGeneration._keep_in_fpaa_modules UpperCamelCase_ = None # test with `t5-small` UpperCamelCase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) UpperCamelCase_ = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` UpperCamelCase_ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) UpperCamelCase_ = model.generate(**_UpperCAmelCase ) UpperCamelCase_ = modules def _UpperCAmelCase ( self ) -> str: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` UpperCamelCase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) UpperCamelCase_ = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` UpperCamelCase_ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) UpperCamelCase_ = model.generate(**_UpperCAmelCase ) class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> Union[str, Any]: super().setUp() # model_name UpperCamelCase_ = 'bigscience/bloom-560m' UpperCamelCase_ = 't5-small' # Different types of model UpperCamelCase_ = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Sequence classification model UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # CausalLM model UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Seq2seq model UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Union[str, Any]: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> Any: super().setUp() def _UpperCAmelCase ( self ) -> Optional[Any]: del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass UpperCamelCase_ = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> Dict: super().setUp() def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model UpperCamelCase_ = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch UpperCamelCase_ = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = 'facebook/opt-350m' super().setUp() def _UpperCAmelCase ( self ) -> Optional[Any]: if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): UpperCamelCase_ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability UpperCamelCase_ = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCAmelCase ) ): UpperCamelCase_ = LoRALayer(module.q_proj , rank=16 ) UpperCamelCase_ = LoRALayer(module.k_proj , rank=16 ) UpperCamelCase_ = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch UpperCamelCase_ = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): UpperCamelCase_ = model.forward(**_UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """gpt2-xl""" A_ = 3.3_191_854_854_152_187
23
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType snake_case__ : List[str] = logging.get_logger(__name__) class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """vision-encoder-decoder""" A_ = True def __init__( self , **_UpperCAmelCase ) -> Dict: super().__init__(**_UpperCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) UpperCamelCase_ = kwargs.pop('encoder' ) UpperCamelCase_ = encoder_config.pop('model_type' ) UpperCamelCase_ = kwargs.pop('decoder' ) UpperCamelCase_ = decoder_config.pop('model_type' ) UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = True @classmethod def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig: logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) UpperCamelCase_ = True UpperCamelCase_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = copy.deepcopy(self.__dict__ ) UpperCamelCase_ = self.encoder.to_dict() UpperCamelCase_ = self.decoder.to_dict() UpperCamelCase_ = self.__class__.model_type return output class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = version.parse("""1.11""" ) @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _UpperCAmelCase ( self ) -> float: return 1e-4 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} ) class _a ( UpperCAmelCase__ ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: UpperCamelCase_ = OrderedDict() UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'} return common_inputs def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]: import torch UpperCamelCase_ = OrderedDict() UpperCamelCase_ = super().generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCamelCase_ = dummy_input.pop('input_ids' ) UpperCamelCase_ = dummy_input.pop('attention_mask' ) UpperCamelCase_ = torch.zeros(_UpperCAmelCase ) return common_inputs class _a ( UpperCAmelCase__ ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> None: pass def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig: UpperCamelCase_ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
23
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = False ) -> str: UpperCamelCase_ = scheduler UpperCamelCase_ = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers] UpperCamelCase_ = split_batches UpperCamelCase_ = step_with_optimizer UpperCamelCase_ = GradientState() def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]: if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step UpperCamelCase_ = AcceleratorState().num_processes for _ in range(_UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: return self.scheduler.get_last_lr() def _UpperCAmelCase ( self ) -> Union[str, Any]: return self.scheduler.state_dict() def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]: self.scheduler.load_state_dict(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: return self.scheduler.get_lr() def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]: return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
23
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _snake_case (__lowercase , __lowercase , __lowercase): # Initialise PyTorch model UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase) print(f"""Building PyTorch model from configuration: {config}""") UpperCamelCase_ = MobileBertForPreTraining(__lowercase) # Load weights from tf checkpoint UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , __lowercase) if __name__ == "__main__": snake_case__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
23
1
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Optional[Any] = { """microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""", # See all Cvt models at https://huggingface.co/models?filter=cvt } class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """cvt""" def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , **_UpperCAmelCase , ) -> Optional[int]: super().__init__(**_UpperCAmelCase ) UpperCamelCase_ = num_channels UpperCamelCase_ = patch_sizes UpperCamelCase_ = patch_stride UpperCamelCase_ = patch_padding UpperCamelCase_ = embed_dim UpperCamelCase_ = num_heads UpperCamelCase_ = depth UpperCamelCase_ = mlp_ratio UpperCamelCase_ = attention_drop_rate UpperCamelCase_ = drop_rate UpperCamelCase_ = drop_path_rate UpperCamelCase_ = qkv_bias UpperCamelCase_ = cls_token UpperCamelCase_ = qkv_projection_method UpperCamelCase_ = kernel_qkv UpperCamelCase_ = padding_kv UpperCamelCase_ = stride_kv UpperCamelCase_ = padding_q UpperCamelCase_ = stride_q UpperCamelCase_ = initializer_range UpperCamelCase_ = layer_norm_eps
23
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _a ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_MASKED_LM_MAPPING A_ = TF_MODEL_FOR_MASKED_LM_MAPPING def _UpperCAmelCase ( self ) -> List[str]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() UpperCamelCase_ = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(_UpperCAmelCase ) @slow @require_tf def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.2_5_1, 'token': 2201, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.2_1_4, 'token': 12790, 'token_str': ' Lyon', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) @require_tf def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = fill_masker.tokenizer UpperCamelCase_ = fill_masker.model UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , ) with self.assertRaises(_UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_UpperCAmelCase ): fill_masker('This is' ) self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = sorted(vocab.keys() )[:2] # Pipeline argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Call argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Score equivalence UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs] UpperCamelCase_ = [top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ) == set(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # top_k=2, ntargets=3 UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.get_vocab() # String duplicates + id duplicates UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_UpperCAmelCase ) , 3 ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , )
23
1
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _snake_case (__lowercase , __lowercase , __lowercase=1e-12): UpperCamelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowercase , axis=1) , a_min=__lowercase)).T UpperCamelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowercase , axis=1) , a_min=__lowercase)).T return jnp.matmul(__lowercase , norm_emb_a.T) class _a ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = FlaxCLIPVisionModule(self.config.vision_config ) UpperCamelCase_ = nn.Dense(self.config.projection_dim , use_bias=_UpperCAmelCase , dtype=self.dtype ) UpperCamelCase_ = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) ) UpperCamelCase_ = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCamelCase_ = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) ) UpperCamelCase_ = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) ) def __call__( self , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = self.vision_model(_UpperCAmelCase )[1] UpperCamelCase_ = self.visual_projection(_UpperCAmelCase ) UpperCamelCase_ = jax_cosine_distance(_UpperCAmelCase , self.special_care_embeds ) UpperCamelCase_ = jax_cosine_distance(_UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCamelCase_ = 0.0 UpperCamelCase_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCamelCase_ = jnp.round(_UpperCAmelCase , 3 ) UpperCamelCase_ = jnp.any(special_scores > 0 , axis=1 , keepdims=_UpperCAmelCase ) # Use a lower threshold if an image has any special care concept UpperCamelCase_ = is_special_care * 0.0_1 UpperCamelCase_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCamelCase_ = jnp.round(_UpperCAmelCase , 3 ) UpperCamelCase_ = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = CLIPConfig A_ = """clip_input""" A_ = FlaxStableDiffusionSafetyCheckerModule def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = jnp.floataa , _UpperCAmelCase = True , **_UpperCAmelCase , ) -> Union[str, Any]: if input_shape is None: UpperCamelCase_ = (1, 224, 224, 3) UpperCamelCase_ = self.module_class(config=_UpperCAmelCase , dtype=_UpperCAmelCase , **_UpperCAmelCase ) super().__init__(_UpperCAmelCase , _UpperCAmelCase , input_shape=_UpperCAmelCase , seed=_UpperCAmelCase , dtype=_UpperCAmelCase , _do_init=_do_init ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ) -> FrozenDict: # init input tensor UpperCamelCase_ = jax.random.normal(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = jax.random.split(_UpperCAmelCase ) UpperCamelCase_ = {'params': params_rng, 'dropout': dropout_rng} UpperCamelCase_ = self.module.init(_UpperCAmelCase , _UpperCAmelCase )['params'] return random_params def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , ) -> Optional[int]: UpperCamelCase_ = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} , jnp.array(_UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
23
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = StableDiffusionSAGPipeline A_ = TEXT_TO_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_BATCH_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) UpperCamelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase ) UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]: if str(_UpperCAmelCase ).startswith('mps' ): UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase ) else: UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCamelCase_ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def _UpperCAmelCase ( self ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) UpperCamelCase_ = output.images assert image.shape == (1, 512, 768, 3)
23
1
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin snake_case__ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""") snake_case__ : Dict = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") snake_case__ : Tuple = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class _a ( UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = CamembertTokenizer A_ = CamembertTokenizerFast A_ = True A_ = True def _UpperCAmelCase ( self ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase_ = CamembertTokenizer(_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = '<pad>' UpperCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>NOTUSED' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(_UpperCAmelCase ) , 1004 ) def _UpperCAmelCase ( self ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = CamembertTokenizer(_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) UpperCamelCase_ = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) UpperCamelCase_ = 'I was born in 92000, and this is falsé.' UpperCamelCase_ = tokenizer.encode(_UpperCAmelCase ) UpperCamelCase_ = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) UpperCamelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) UpperCamelCase_ = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: if not self.test_rust_tokenizer: return UpperCamelCase_ = self.get_tokenizer() UpperCamelCase_ = self.get_rust_tokenizer() UpperCamelCase_ = 'I was born in 92000, and this is falsé.' UpperCamelCase_ = tokenizer.tokenize(_UpperCAmelCase ) UpperCamelCase_ = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = self.get_rust_tokenizer() UpperCamelCase_ = tokenizer.encode(_UpperCAmelCase ) UpperCamelCase_ = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Optional[int]: # fmt: off UpperCamelCase_ = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. UpperCamelCase_ = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=_UpperCAmelCase , )
23
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar snake_case__ : List[str] = TypeVar("""T""") def _snake_case (__lowercase): return (position - 1) // 2 def _snake_case (__lowercase): return (2 * position) + 1 def _snake_case (__lowercase): return (2 * position) + 2 class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = [] UpperCamelCase_ = {} UpperCamelCase_ = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def _UpperCAmelCase ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) UpperCamelCase_ = self.elements self.elements += 1 self._bubble_up(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) UpperCamelCase_ , UpperCamelCase_ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: UpperCamelCase_ , UpperCamelCase_ = self.heap[0] self._bubble_down(_UpperCAmelCase ) return elem def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Update the weight of the given key UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ = (elem, weight) if position > 0: UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] UpperCamelCase_ = self.position_map[elem] if curr_pos == 0: return None UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_up(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ = get_child_left_position(_UpperCAmelCase ) UpperCamelCase_ = get_child_right_position(_UpperCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) if child_left_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) else: return None if child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Swap the nodes at the given positions UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ , UpperCamelCase_ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) UpperCamelCase_ = nodea_pos UpperCamelCase_ = nodea_pos class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = {} UpperCamelCase_ = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: UpperCamelCase_ = {} self.nodes += 1 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(_UpperCAmelCase ) self.add_node(_UpperCAmelCase ) UpperCamelCase_ = weight UpperCamelCase_ = weight def _snake_case (__lowercase , ): UpperCamelCase_ = {node: maxsize for node in graph.connections} UpperCamelCase_ = {node: None for node in graph.connections} UpperCamelCase_ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__lowercase , __lowercase) if priority_queue.is_empty(): return dist, parent # initialization UpperCamelCase_ = priority_queue.extract_min() UpperCamelCase_ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node # running prim's algorithm while not priority_queue.is_empty(): UpperCamelCase_ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node return dist, parent
23
1
from math import factorial, pi def _snake_case (__lowercase , __lowercase = 30): if not isinstance(__lowercase , (int, float)): raise ValueError('maclaurin_sin() requires either an int or float for theta') if not isinstance(__lowercase , __lowercase) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy') UpperCamelCase_ = float(__lowercase) UpperCamelCase_ = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(__lowercase)) def _snake_case (__lowercase , __lowercase = 30): if not isinstance(__lowercase , (int, float)): raise ValueError('maclaurin_cos() requires either an int or float for theta') if not isinstance(__lowercase , __lowercase) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy') UpperCamelCase_ = float(__lowercase) UpperCamelCase_ = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(__lowercase)) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(1_0)) print(maclaurin_sin(-1_0)) print(maclaurin_sin(1_0, 1_5)) print(maclaurin_sin(-1_0, 1_5)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(1_0, 1_5)) print(maclaurin_cos(-1_0, 1_5))
23
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar snake_case__ : Dict = TypeVar("""T""") class _a ( Generic[T] ): """simple docstring""" A_ = 42 # Cache store of keys A_ = 42 # References of the keys in cache A_ = 10 # Maximum capacity of cache def __init__( self , _UpperCAmelCase ) -> None: UpperCamelCase_ = deque() UpperCamelCase_ = set() if not n: UpperCamelCase_ = sys.maxsize elif n < 0: raise ValueError('n should be an integer greater than 0.' ) else: UpperCamelCase_ = n def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: UpperCamelCase_ = self.dq_store.pop() self.key_reference.remove(_UpperCAmelCase ) else: self.dq_store.remove(_UpperCAmelCase ) self.dq_store.appendleft(_UpperCAmelCase ) self.key_reference.add(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> None: for k in self.dq_store: print(_UpperCAmelCase ) def __repr__( self ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : LRUCache[str | int] = LRUCache(4) lru_cache.refer("""A""") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("""A""") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
23
1
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _a ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_MASKED_LM_MAPPING A_ = TF_MODEL_FOR_MASKED_LM_MAPPING def _UpperCAmelCase ( self ) -> List[str]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() UpperCamelCase_ = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(_UpperCAmelCase ) @slow @require_tf def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.2_5_1, 'token': 2201, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.2_1_4, 'token': 12790, 'token_str': ' Lyon', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) @require_tf def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = fill_masker.tokenizer UpperCamelCase_ = fill_masker.model UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , ) with self.assertRaises(_UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_UpperCAmelCase ): fill_masker('This is' ) self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = sorted(vocab.keys() )[:2] # Pipeline argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Call argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Score equivalence UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs] UpperCamelCase_ = [top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ) == set(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # top_k=2, ntargets=3 UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.get_vocab() # String duplicates + id duplicates UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_UpperCAmelCase ) , 3 ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , )
23
import numpy as np def _snake_case (__lowercase): return 1 / (1 + np.exp(-vector)) def _snake_case (__lowercase): return vector * sigmoid(__lowercase) if __name__ == "__main__": import doctest doctest.testmod()
23
1
from torch import nn def _snake_case (__lowercase): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"""Unsupported activation function: {act_fn}""")
23
import math from datetime import datetime, timedelta def _snake_case (__lowercase): UpperCamelCase_ = year % 19 UpperCamelCase_ = year % 4 UpperCamelCase_ = year % 7 UpperCamelCase_ = math.floor(year / 100) UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25) UpperCamelCase_ = leap_day_inhibits / 4 UpperCamelCase_ = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon UpperCamelCase_ = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 19) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 18) else: return datetime(__lowercase , 3 , 22) + timedelta( days=int(days_to_add + days_from_phm_to_sunday)) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): snake_case__ : Dict = """will be""" if year > datetime.now().year else """was""" print(f'Easter in {year} {tense} {gauss_easter(year)}')
23
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) snake_case__ : Optional[Any] = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Any = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Union[str, Any] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[Any] = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
import requests def _snake_case (__lowercase , __lowercase): UpperCamelCase_ = {'Content-Type': 'application/json'} UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase) if response.status_code != 200: UpperCamelCase_ = ( 'Request to slack returned an error ' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(__lowercase) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
23
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Any = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """ibert""" def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=False , _UpperCAmelCase="none" , **_UpperCAmelCase , ) -> Tuple: super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = hidden_act UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = type_vocab_size UpperCamelCase_ = initializer_range UpperCamelCase_ = layer_norm_eps UpperCamelCase_ = position_embedding_type UpperCamelCase_ = quant_mode UpperCamelCase_ = force_dequant class _a ( UpperCAmelCase__ ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": UpperCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase_ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
23
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) UpperCamelCase_ = input_file.read() UpperCamelCase_ = regexp.search(_UpperCAmelCase ) return match def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) UpperCamelCase_ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase_ = regexp.finditer(_UpperCAmelCase ) UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(_UpperCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
23
1
def _snake_case (__lowercase , __lowercase): return int((input_a, input_a).count(0) != 0) def _snake_case (): assert nand_gate(0 , 0) == 1 assert nand_gate(0 , 1) == 1 assert nand_gate(1 , 0) == 1 assert nand_gate(1 , 1) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
23
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ): set_seed(3) # generate train_data and objective_set UpperCamelCase_ , UpperCamelCase_ = generate_datasets( __lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase) # keeps model same across runs set_seed(4) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # load pretrained model UpperCamelCase_ = load_gpta('gpt2').to(__lowercase) print('computing perplexity on objective set') UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item() print('perplexity on objective set:' , __lowercase) # collect igf pairs and save to file demo.jbl collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ): set_seed(42) # Load pre-trained model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') # Initialize secondary learner to use embedding weights of model UpperCamelCase_ = SecondaryLearner(__lowercase) # Train secondary learner UpperCamelCase_ = train_secondary_learner( __lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ): UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') UpperCamelCase_ = RandomSampler(__lowercase) UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase) UpperCamelCase_ = max_steps // (len(__lowercase)) + 1 UpperCamelCase_ = 0 UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase) model.train() if secondary_learner is not None: secondary_learner.to(__lowercase) secondary_learner.eval() UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [] UpperCamelCase_ = [] # Compute the performance of the transformer model at the beginning UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) for epoch in range(int(__lowercase)): for step, example in enumerate(__lowercase): torch.cuda.empty_cache() UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1) UpperCamelCase_ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase_ = model(__lowercase , labels=__lowercase) UpperCamelCase_ = True if secondary_learner is not None: UpperCamelCase_ = secondary_learner.forward( torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item() observed_qs.append(float(__lowercase)) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase_ = -1 if predicted_q < threshold: UpperCamelCase_ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu())) UpperCamelCase_ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase_ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __lowercase) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task') # Required parameters parser.add_argument( '--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=__lowercase , default=__lowercase , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.') parser.add_argument( '--context_len' , default=32 , type=__lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq') parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs') parser.add_argument( '--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ') parser.add_argument( '--eval_interval' , default=10 , type=__lowercase , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data') parser.add_argument( '--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set') parser.add_argument( '--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner') parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length') parser.add_argument( '--threshold' , default=1.0 , type=__lowercase , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name') parser.add_argument( '--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner UpperCamelCase_ = joblib.load('data/IGF_values.jbl') # Train secondary learner UpperCamelCase_ = training_secondary_learner( __lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') set_seed(42) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase_ , UpperCamelCase_ = generate_datasets( context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
23
1
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _UpperCAmelCase ( self ) -> int: torch.manual_seed(0 ) UpperCamelCase_ = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return model @property def _UpperCAmelCase ( self ) -> str: torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , ) return model @property def _UpperCAmelCase ( self ) -> Dict: torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , ) UpperCamelCase_ = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return vqvae, unet @slow def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase_ = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) UpperCamelCase_ = DDPMScheduler() UpperCamelCase_ = AudioDiffusionPipeline(vqvae=_UpperCAmelCase , unet=self.dummy_unet , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase ) UpperCamelCase_ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 ) UpperCamelCase_ = pipe(generator=_UpperCAmelCase , steps=4 ) UpperCamelCase_ = output.audios[0] UpperCamelCase_ = output.images[0] UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 ) UpperCamelCase_ = pipe(generator=_UpperCAmelCase , steps=4 , return_dict=_UpperCAmelCase ) UpperCamelCase_ = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) UpperCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] UpperCamelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10] UpperCamelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 UpperCamelCase_ = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) UpperCamelCase_ = DDIMScheduler() UpperCamelCase_ = self.dummy_vqvae_and_unet UpperCamelCase_ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase ) UpperCamelCase_ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) np.random.seed(0 ) UpperCamelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 ) UpperCamelCase_ = pipe(raw_audio=_UpperCAmelCase , generator=_UpperCAmelCase , start_step=5 , steps=10 ) UpperCamelCase_ = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) UpperCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] UpperCamelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 UpperCamelCase_ = self.dummy_unet_condition UpperCamelCase_ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCAmelCase , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase ) UpperCamelCase_ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) np.random.seed(0 ) UpperCamelCase_ = torch.rand((1, 1, 10) ) UpperCamelCase_ = pipe(generator=_UpperCAmelCase , encoding=_UpperCAmelCase ) UpperCamelCase_ = output.images[0] UpperCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] UpperCamelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = torch_device UpperCamelCase_ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) UpperCamelCase_ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 ) UpperCamelCase_ = pipe(generator=_UpperCAmelCase ) UpperCamelCase_ = output.audios[0] UpperCamelCase_ = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] UpperCamelCase_ = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] UpperCamelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
23
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _a : """simple docstring""" A_ = MBartConfig A_ = {} A_ = """gelu""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = eos_token_id UpperCamelCase_ = pad_token_id UpperCamelCase_ = bos_token_id def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder() UpperCamelCase_ = inputs_dict['input_ids'] UpperCamelCase_ = input_ids[:1, :] UpperCamelCase_ = inputs_dict['attention_mask'][:1, :] UpperCamelCase_ = inputs_dict['head_mask'] UpperCamelCase_ = 1 # first forward pass UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple() UpperCamelCase_ = past_key_values[1] def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ): if attention_mask is None: UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta) if decoder_attention_mask is None: UpperCamelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta), ] , axis=-1 , ) if head_mask is None: UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else () A_ = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) A_ = True A_ = False A_ = False def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = TFMBartModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _a ( unittest.TestCase ): """simple docstring""" A_ = [ """ UN Chief Says There Is No Military Solution in Syria""", ] A_ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] A_ = """facebook/mbart-large-en-ro""" @cached_property def _UpperCAmelCase ( self ) -> Any: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int: UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase ) self.assertListEqual(self.expected_text , _UpperCAmelCase ) def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]: UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' ) UpperCamelCase_ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def _UpperCAmelCase ( self ) -> List[Any]: self._assert_generated_batch_equal_expected()
23
1
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline snake_case__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class _a ( UpperCAmelCase__ ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = 100 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[AudioPipelineOutput, Tuple]: if audio_length_in_s is None: UpperCamelCase_ = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase_ = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase_ = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase_ = int(_UpperCAmelCase ) if sample_size % down_scale_factor != 0: UpperCamelCase_ = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ' process.' ) UpperCamelCase_ = int(_UpperCAmelCase ) UpperCamelCase_ = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase_ = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase_ = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase , device=audio.device ) UpperCamelCase_ = self.scheduler.timesteps.to(_UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase_ = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample UpperCamelCase_ = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase_ = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_UpperCAmelCase )
23
def _snake_case (__lowercase): UpperCamelCase_ = 1 for i in range(1 , num + 1): fact *= i return fact def _snake_case (__lowercase): UpperCamelCase_ = 0 while number > 0: UpperCamelCase_ = number % 10 sum_of_digits += last_digit UpperCamelCase_ = number // 10 # Removing the last_digit from the given number return sum_of_digits def _snake_case (__lowercase = 100): UpperCamelCase_ = factorial(__lowercase) UpperCamelCase_ = split_and_add(__lowercase) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
23
1
import string def _snake_case (__lowercase): for key in range(len(string.ascii_uppercase)): UpperCamelCase_ = '' for symbol in message: if symbol in string.ascii_uppercase: UpperCamelCase_ = string.ascii_uppercase.find(__lowercase) UpperCamelCase_ = num - key if num < 0: UpperCamelCase_ = num + len(string.ascii_uppercase) UpperCamelCase_ = translated + string.ascii_uppercase[num] else: UpperCamelCase_ = translated + symbol print(f"""Decryption using Key #{key}: {translated}""") def _snake_case (): UpperCamelCase_ = input('Encrypted message: ') UpperCamelCase_ = message.upper() decrypt(__lowercase) if __name__ == "__main__": import doctest doctest.testmod() main()
23
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case__ : str = logging.get_logger(__name__) def _snake_case (__lowercase): if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(__lowercase): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""") class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = ["""pixel_values"""] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None: super().__init__(**_UpperCAmelCase ) UpperCamelCase_ = size if size is not None else {'shortest_edge': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' ) UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = do_center_crop UpperCamelCase_ = crop_size UpperCamelCase_ = resample UpperCamelCase_ = do_rescale UpperCamelCase_ = rescale_factor UpperCamelCase_ = do_normalize UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" in size: UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase ) elif "height" in size and "width" in size: UpperCamelCase_ = (size['height'], size['width']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int: return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. UpperCamelCase_ = to_numpy_array(_UpperCAmelCase ) if do_resize: UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) if do_center_crop: UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase ) if do_rescale: UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) if do_normalize: UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) return image def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image: UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' ) if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) UpperCamelCase_ = make_batched(_UpperCAmelCase ) UpperCamelCase_ = [ [ self._preprocess_image( image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , ) for img in video ] for video in videos ] UpperCamelCase_ = {'pixel_values': videos} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
23
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case__ : int = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[Any] = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys snake_case__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 A_ = 42 class _a ( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" A_ = 1 @register_to_config def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple: # standard deviation of the initial noise distribution UpperCamelCase_ = sigma_max # setable values UpperCamelCase_ = None self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor: return sample def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str: UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any: UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) UpperCamelCase_ = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device ) UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device ) UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device ) UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase ) UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods UpperCamelCase_ = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): UpperCamelCase_ = diffusion.unsqueeze(-1 ) UpperCamelCase_ = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of UpperCamelCase_ = randn_tensor( sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype ) UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term UpperCamelCase_ = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): UpperCamelCase_ = step_size.unsqueeze(-1 ) UpperCamelCase_ = sample + step_size * model_output UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCamelCase_ = timesteps.to(original_samples.device ) UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps] UpperCamelCase_ = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None] ) UpperCamelCase_ = noise + original_samples return noisy_samples def __len__( self ) -> Optional[int]: return self.config.num_train_timesteps
23
1
from __future__ import annotations snake_case__ : Any = list[list[int]] # assigning initial values to the grid snake_case__ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution snake_case__ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase): for i in range(9): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3): for j in range(3): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case (__lowercase): for i in range(9): for j in range(9): if grid[i][j] == 0: return i, j return None def _snake_case (__lowercase): if location := find_empty_location(__lowercase): UpperCamelCase_ , UpperCamelCase_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10): if is_safe(__lowercase , __lowercase , __lowercase , __lowercase): UpperCamelCase_ = digit if sudoku(__lowercase) is not None: return grid UpperCamelCase_ = 0 return None def _snake_case (__lowercase): for row in grid: for cell in row: print(__lowercase , end=' ') print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 2_0) print_solution(example_grid) print("""\nExample grid solution:""") snake_case__ : Any = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
23
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : Optional[int] = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
1
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa snake_case__ : str = logging.getLogger(__name__) class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """summarization""" A_ = ["""loss"""] A_ = ROUGE_KEYS A_ = """rouge2""" def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: if hparams.sortish_sampler and hparams.gpus > 1: UpperCamelCase_ = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(_UpperCAmelCase , num_labels=_UpperCAmelCase , mode=self.mode , **_UpperCAmelCase ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) UpperCamelCase_ = Path(self.output_dir ) / 'metrics.json' UpperCamelCase_ = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) UpperCamelCase_ = 0 UpperCamelCase_ = defaultdict(_UpperCAmelCase ) UpperCamelCase_ = self.config.model_type UpperCamelCase_ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size UpperCamelCase_ = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } UpperCamelCase_ = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } UpperCamelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} UpperCamelCase_ = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}""" assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}""" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) UpperCamelCase_ = get_git_info()['repo_sha'] UpperCamelCase_ = hparams.num_workers UpperCamelCase_ = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCAmelCase ): UpperCamelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang] UpperCamelCase_ = self.decoder_start_token_id UpperCamelCase_ = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) UpperCamelCase_ = False UpperCamelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: UpperCamelCase_ = self.hparams.eval_max_gen_length else: UpperCamelCase_ = self.model.config.max_length UpperCamelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict[str, List[str]]: UpperCamelCase_ = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(_UpperCAmelCase , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) UpperCamelCase_ = True return readable_batch def _UpperCAmelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]: return self.model(_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int: UpperCamelCase_ = self.tokenizer.batch_decode( _UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) return lmap(str.strip , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = self.tokenizer.pad_token_id UpperCamelCase_ , UpperCamelCase_ = batch['input_ids'], batch['attention_mask'] UpperCamelCase_ = batch['labels'] if isinstance(self.model , _UpperCAmelCase ): UpperCamelCase_ = self.model._shift_right(_UpperCAmelCase ) else: UpperCamelCase_ = shift_tokens_right(_UpperCAmelCase , _UpperCAmelCase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero UpperCamelCase_ = decoder_input_ids self.save_readable_batch(_UpperCAmelCase ) UpperCamelCase_ = self(_UpperCAmelCase , attention_mask=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , use_cache=_UpperCAmelCase ) UpperCamelCase_ = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id UpperCamelCase_ = nn.CrossEntropyLoss(ignore_index=_UpperCAmelCase ) assert lm_logits.shape[-1] == self.vocab_size UpperCamelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: UpperCamelCase_ = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 ) UpperCamelCase_ , UpperCamelCase_ = label_smoothed_nll_loss( _UpperCAmelCase , _UpperCAmelCase , self.hparams.label_smoothing , ignore_index=_UpperCAmelCase ) return (loss,) @property def _UpperCAmelCase ( self ) -> int: return self.tokenizer.pad_token_id def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: UpperCamelCase_ = self._step(_UpperCAmelCase ) UpperCamelCase_ = dict(zip(self.loss_names , _UpperCAmelCase ) ) # tokens per batch UpperCamelCase_ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() UpperCamelCase_ = batch['input_ids'].shape[0] UpperCamelCase_ = batch['input_ids'].eq(self.pad ).sum() UpperCamelCase_ = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: return self._generative_step(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase="val" ) -> Dict: self.step_count += 1 UpperCamelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} UpperCamelCase_ = losses['loss'] UpperCamelCase_ = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } UpperCamelCase_ = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).type_as(_UpperCAmelCase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(_UpperCAmelCase ) UpperCamelCase_ = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()} UpperCamelCase_ = self.step_count self.metrics[prefix].append(_UpperCAmelCase ) # callback writes this to self.metrics_save_path UpperCamelCase_ = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, f"""{prefix}_loss""": loss, f"""{prefix}_{self.val_metric}""": metric_tensor, } def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: return calculate_rouge(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> dict: UpperCamelCase_ = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') UpperCamelCase_ = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=_UpperCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) UpperCamelCase_ = (time.time() - ta) / batch['input_ids'].shape[0] UpperCamelCase_ = self.ids_to_clean_text(_UpperCAmelCase ) UpperCamelCase_ = self.ids_to_clean_text(batch['labels'] ) UpperCamelCase_ = self._step(_UpperCAmelCase ) UpperCamelCase_ = dict(zip(self.loss_names , _UpperCAmelCase ) ) UpperCamelCase_ = self.calc_generative_metrics(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = np.mean(lmap(_UpperCAmelCase , _UpperCAmelCase ) ) base_metrics.update(gen_time=_UpperCAmelCase , gen_len=_UpperCAmelCase , preds=_UpperCAmelCase , target=_UpperCAmelCase , **_UpperCAmelCase ) return base_metrics def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> str: return self._generative_step(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int: return self.validation_epoch_end(_UpperCAmelCase , prefix='test' ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> SeqaSeqDataset: UpperCamelCase_ = self.n_obs[type_path] UpperCamelCase_ = self.target_lens[type_path] UpperCamelCase_ = self.dataset_class( self.tokenizer , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , max_target_length=_UpperCAmelCase , **self.dataset_kwargs , ) return dataset def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ) -> DataLoader: UpperCamelCase_ = self.get_dataset(_UpperCAmelCase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": UpperCamelCase_ = dataset.make_sortish_sampler(_UpperCAmelCase , distributed=self.hparams.gpus > 1 ) return DataLoader( _UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCAmelCase , num_workers=self.num_workers , sampler=_UpperCAmelCase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": UpperCamelCase_ = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( _UpperCAmelCase , batch_sampler=_UpperCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( _UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCAmelCase , num_workers=self.num_workers , sampler=_UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> DataLoader: UpperCamelCase_ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=_UpperCAmelCase ) return dataloader def _UpperCAmelCase ( self ) -> DataLoader: return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def _UpperCAmelCase ( self ) -> DataLoader: return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def _UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: BaseTransformer.add_model_specific_args(_UpperCAmelCase , _UpperCAmelCase ) add_generic_args(_UpperCAmelCase , _UpperCAmelCase ) parser.add_argument( '--max_source_length' , default=1024 , type=_UpperCAmelCase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=56 , type=_UpperCAmelCase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=142 , type=_UpperCAmelCase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=142 , type=_UpperCAmelCase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=_UpperCAmelCase ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=_UpperCAmelCase ) parser.add_argument('--max_tokens_per_batch' , type=_UpperCAmelCase , default=_UpperCAmelCase ) parser.add_argument('--logger_name' , type=_UpperCAmelCase , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=_UpperCAmelCase , default=500 , required=_UpperCAmelCase , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=_UpperCAmelCase , default='summarization' , required=_UpperCAmelCase , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=_UpperCAmelCase , default=0.0 , required=_UpperCAmelCase ) parser.add_argument('--src_lang' , type=_UpperCAmelCase , default='' , required=_UpperCAmelCase ) parser.add_argument('--tgt_lang' , type=_UpperCAmelCase , default='' , required=_UpperCAmelCase ) parser.add_argument('--eval_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase ) parser.add_argument( '--val_metric' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """translation""" A_ = ["""loss"""] A_ = ["""bleu"""] A_ = """bleu""" def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: super().__init__(_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = hparams.src_lang UpperCamelCase_ = hparams.tgt_lang def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> dict: return calculate_bleu(_UpperCAmelCase , _UpperCAmelCase ) def _snake_case (__lowercase , __lowercase=None): Path(args.output_dir).mkdir(exist_ok=__lowercase) check_output_dir(__lowercase , expected_items=3) if model is None: if "summarization" in args.task: UpperCamelCase_ = SummarizationModule(__lowercase) else: UpperCamelCase_ = TranslationModule(__lowercase) UpperCamelCase_ = Path(args.data_dir).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir).startswith('/tmp') or str(args.output_dir).startswith('/var') ): UpperCamelCase_ = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger UpperCamelCase_ = os.environ.get('WANDB_PROJECT' , __lowercase) UpperCamelCase_ = WandbLogger(name=model.output_dir.name , project=__lowercase) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger UpperCamelCase_ = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""") if args.early_stopping_patience >= 0: UpperCamelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience) else: UpperCamelCase_ = False UpperCamelCase_ = args.val_metric == 'loss' UpperCamelCase_ = generic_train( __lowercase , __lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , __lowercase) , early_stopping_callback=__lowercase , logger=__lowercase , ) pickle_save(model.hparams , model.output_dir / 'hparams.pkl') if not args.do_predict: return model UpperCamelCase_ = '' UpperCamelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt') , recursive=__lowercase)) if checkpoints: UpperCamelCase_ = checkpoints[-1] UpperCamelCase_ = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() snake_case__ : Dict = pl.Trainer.add_argparse_args(parser) snake_case__ : List[str] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) snake_case__ : Optional[Any] = parser.parse_args() main(args)
23
import datasets from .evaluate import evaluate snake_case__ : int = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ snake_case__ : Union[str, Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ snake_case__ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def _UpperCAmelCase ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': { 'id': datasets.Value('string' ), 'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ), }, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions} UpperCamelCase_ = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase ) return score
23
1
snake_case__ : Optional[Any] = tuple[float, float, float] snake_case__ : Any = tuple[float, float, float] def _snake_case (__lowercase , __lowercase): UpperCamelCase_ = end_pointa[0] - end_pointa[0] UpperCamelCase_ = end_pointa[1] - end_pointa[1] UpperCamelCase_ = end_pointa[2] - end_pointa[2] return (x, y, z) def _snake_case (__lowercase , __lowercase): UpperCamelCase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i UpperCamelCase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j UpperCamelCase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _snake_case (__lowercase , __lowercase): return tuple(round(__lowercase , __lowercase) for x in vector) == (0, 0, 0) def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 10): UpperCamelCase_ = create_vector(__lowercase , __lowercase) UpperCamelCase_ = create_vector(__lowercase , __lowercase) return is_zero_vector(get_ad_vectors_cross(__lowercase , __lowercase) , __lowercase)
23
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class _a ( datasets.BeamBasedBuilder ): """simple docstring""" def _UpperCAmelCase ( self ) -> List[str]: return datasets.DatasetInfo( features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )] def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class _a ( datasets.BeamBasedBuilder ): """simple docstring""" def _UpperCAmelCase ( self ) -> Any: return datasets.DatasetInfo( features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} ) ] def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def _snake_case (): return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])] def _snake_case (): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])] class _a ( UpperCAmelCase__ ): """simple docstring""" @require_beam def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def _UpperCAmelCase ( self ) -> List[str]: import apache_beam as beam UpperCamelCase_ = beam.io.parquetio.WriteToParquet UpperCamelCase_ = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock: UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def _UpperCAmelCase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset
23
1
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : Union[str, Any] = """platform""" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _a : """simple docstring""" A_ = PegasusConfig A_ = {} A_ = """gelu""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Optional[int]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = eos_token_id UpperCamelCase_ = pad_token_id UpperCamelCase_ = bos_token_id def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) UpperCamelCase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_ = np.concatenate([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_ = prepare_pegasus_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: UpperCamelCase_ = 20 UpperCamelCase_ = model_class_name(_UpperCAmelCase ) UpperCamelCase_ = model.encode(inputs_dict['input_ids'] ) UpperCamelCase_ , UpperCamelCase_ = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) UpperCamelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCamelCase_ = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase_ = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , ) UpperCamelCase_ = model.decode(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = 20 UpperCamelCase_ = model_class_name(_UpperCAmelCase ) UpperCamelCase_ = model.encode(inputs_dict['input_ids'] ) UpperCamelCase_ , UpperCamelCase_ = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) UpperCamelCase_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCamelCase_ = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCamelCase_ = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) UpperCamelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase_ = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) UpperCamelCase_ = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase ) UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ): if attention_mask is None: UpperCamelCase_ = np.not_equal(__lowercase , config.pad_token_id).astype(np.inta) if decoder_attention_mask is None: UpperCamelCase_ = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id).astype(np.inta), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _a ( UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) A_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () A_ = True A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = FlaxPegasusModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = model_class(_UpperCAmelCase ) @jax.jit def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ): return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) with self.subTest('JIT Enabled' ): UpperCamelCase_ = encode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase_ = encode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) UpperCamelCase_ = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return model.decode( decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , ) with self.subTest('JIT Enabled' ): UpperCamelCase_ = decode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase_ = decode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _UpperCAmelCase ( self ) -> int: for model_class_name in self.all_model_classes: UpperCamelCase_ = model_class_name.from_pretrained('google/pegasus-large' , from_pt=_UpperCAmelCase ) UpperCamelCase_ = np.ones((1, 1) ) UpperCamelCase_ = model(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) UpperCamelCase_ = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) UpperCamelCase_ = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] UpperCamelCase_ = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] UpperCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='np' , truncation=_UpperCAmelCase , max_length=512 , padding=_UpperCAmelCase ) UpperCamelCase_ = model.generate(**_UpperCAmelCase , num_beams=2 ).sequences UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) assert tgt_text == decoded
23
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _snake_case (__lowercase , __lowercase , __lowercase): # Initialise PyTorch model UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase) print(f"""Building PyTorch model from configuration: {config}""") UpperCamelCase_ = AlbertForPreTraining(__lowercase) # Load weights from tf checkpoint load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , __lowercase) if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
23
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) snake_case__ : Tuple = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys snake_case__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _a ( UpperCAmelCase__ ): """simple docstring""" @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' ) UpperCamelCase_ = bertabert.config.encoder.vocab_size UpperCamelCase_ = tokenizer.sep_token_id UpperCamelCase_ = tokenizer.cls_token_id UpperCamelCase_ = 128 UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) UpperCamelCase_ = train_dataset.select(range(32 ) ) UpperCamelCase_ = val_dataset.select(range(16 ) ) UpperCamelCase_ = 4 def _map_to_encoder_decoder_inputs(_UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 ) UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 ) UpperCamelCase_ = inputs.input_ids UpperCamelCase_ = inputs.attention_mask UpperCamelCase_ = outputs.input_ids UpperCamelCase_ = outputs.input_ids.copy() UpperCamelCase_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] UpperCamelCase_ = outputs.attention_mask assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_UpperCAmelCase ): UpperCamelCase_ = pred.label_ids UpperCamelCase_ = pred.predictions # all unnecessary tokens are removed UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset UpperCamelCase_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset UpperCamelCase_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) UpperCamelCase_ = self.get_auto_remove_tmp_dir() UpperCamelCase_ = SeqaSeqTrainingArguments( output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer UpperCamelCase_ = SeqaSeqTrainer( model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , ) # start training trainer.train()
23
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _a ( UpperCAmelCase__ ): """simple docstring""" @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' ) UpperCamelCase_ = bertabert.config.encoder.vocab_size UpperCamelCase_ = tokenizer.sep_token_id UpperCamelCase_ = tokenizer.cls_token_id UpperCamelCase_ = 128 UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) UpperCamelCase_ = train_dataset.select(range(32 ) ) UpperCamelCase_ = val_dataset.select(range(16 ) ) UpperCamelCase_ = 4 def _map_to_encoder_decoder_inputs(_UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 ) UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 ) UpperCamelCase_ = inputs.input_ids UpperCamelCase_ = inputs.attention_mask UpperCamelCase_ = outputs.input_ids UpperCamelCase_ = outputs.input_ids.copy() UpperCamelCase_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] UpperCamelCase_ = outputs.attention_mask assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_UpperCAmelCase ): UpperCamelCase_ = pred.label_ids UpperCamelCase_ = pred.predictions # all unnecessary tokens are removed UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset UpperCamelCase_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset UpperCamelCase_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) UpperCamelCase_ = self.get_auto_remove_tmp_dir() UpperCamelCase_ = SeqaSeqTrainingArguments( output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer UpperCamelCase_ = SeqaSeqTrainer( model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , ) # start training trainer.train()
23
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ : Dict = 1_6 snake_case__ : List[str] = 3_2 def _snake_case (__lowercase , __lowercase = 16): UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased') UpperCamelCase_ = load_dataset('glue' , 'mrpc') def tokenize_function(__lowercase): # max_length=None => use the model max length (it's actually the default) UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase_ = datasets.map( __lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(__lowercase): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase_ = 16 elif accelerator.mixed_precision != "no": UpperCamelCase_ = 8 else: UpperCamelCase_ = None return tokenizer.pad( __lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , ) # Instantiate dataloaders. UpperCamelCase_ = DataLoader( tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) UpperCamelCase_ = DataLoader( tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case__ : List[str] = mocked_dataloaders # noqa: F811 def _snake_case (__lowercase , __lowercase): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1": UpperCamelCase_ = 2 # New Code # UpperCamelCase_ = int(args.gradient_accumulation_steps) # Initialize accelerator UpperCamelCase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`') # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase_ = config['lr'] UpperCamelCase_ = int(config['num_epochs']) UpperCamelCase_ = int(config['seed']) UpperCamelCase_ = int(config['batch_size']) UpperCamelCase_ = evaluate.load('glue' , 'mrpc') set_seed(__lowercase) UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase_ = model.to(accelerator.device) # Instantiate optimizer UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase) # Instantiate scheduler UpperCamelCase_ = get_linear_schedule_with_warmup( optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # Now we train the model for epoch in range(__lowercase): model.train() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowercase): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = output.loss accelerator.backward(__lowercase) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = outputs.logits.argmax(dim=-1) UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=__lowercase , references=__lowercase , ) UpperCamelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowercase) def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(__lowercase , __lowercase) if __name__ == "__main__": main()
23
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class _a : """simple docstring""" def __init__( self , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = data UpperCamelCase_ = [0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0] @staticmethod def _UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict: return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64) UpperCamelCase_ = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) ) return padded_data def _UpperCAmelCase ( self ) -> Optional[int]: return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = list(struct.unpack('>16L' , _UpperCAmelCase ) ) + [0] * 64 for i in range(16 , 80 ): UpperCamelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = self.padding() UpperCamelCase_ = self.split_blocks() for block in self.blocks: UpperCamelCase_ = self.expand_block(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.h for i in range(0 , 80 ): if 0 <= i < 20: UpperCamelCase_ = (b & c) | ((~b) & d) UpperCamelCase_ = 0x5A82_7999 elif 20 <= i < 40: UpperCamelCase_ = b ^ c ^ d UpperCamelCase_ = 0x6ED9_EBA1 elif 40 <= i < 60: UpperCamelCase_ = (b & c) | (b & d) | (c & d) UpperCamelCase_ = 0x8F1B_BCDC elif 60 <= i < 80: UpperCamelCase_ = b ^ c ^ d UpperCamelCase_ = 0xCA62_C1D6 UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = ( self.rotate(_UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF, a, self.rotate(_UpperCAmelCase , 30 ), c, d, ) UpperCamelCase_ = ( self.h[0] + a & 0xFFFF_FFFF, self.h[1] + b & 0xFFFF_FFFF, self.h[2] + c & 0xFFFF_FFFF, self.h[3] + d & 0xFFFF_FFFF, self.h[4] + e & 0xFFFF_FFFF, ) return ("{:08x}" * 5).format(*self.h ) def _snake_case (): UpperCamelCase_ = B'Test String' assert SHAaHash(__lowercase).final_hash() == hashlib.shaa(__lowercase).hexdigest() # noqa: S324 def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Process some strings or files') parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb') as f: UpperCamelCase_ = f.read() else: UpperCamelCase_ = bytes(__lowercase , 'utf-8') print(SHAaHash(__lowercase).final_hash()) if __name__ == "__main__": main() import doctest doctest.testmod()
23
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = is_training UpperCamelCase_ = use_auxiliary_loss UpperCamelCase_ = num_queries UpperCamelCase_ = num_channels UpperCamelCase_ = min_size UpperCamelCase_ = max_size UpperCamelCase_ = num_labels UpperCamelCase_ = hidden_dim UpperCamelCase_ = hidden_dim def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _UpperCAmelCase ) UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase ) UpperCamelCase_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5 ).float() UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long() UpperCamelCase_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = MaskaFormerConfig( hidden_size=self.hidden_dim , ) UpperCamelCase_ = self.num_queries UpperCamelCase_ = self.num_labels UpperCamelCase_ = [1, 1, 1, 1] UpperCamelCase_ = self.num_channels UpperCamelCase_ = 64 UpperCamelCase_ = 128 UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim return config def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = output.encoder_hidden_states UpperCamelCase_ = output.pixel_decoder_hidden_states UpperCamelCase_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any: with torch.no_grad(): UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() def comm_check_on_output(_UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) UpperCamelCase_ = model( pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _UpperCAmelCase ( self ) -> int: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> str: pass def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ = [*signature.parameters.keys()] UpperCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = (self.model_tester.min_size,) * 2 UpperCamelCase_ = { 'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ), 'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ), 'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(), } UpperCamelCase_ = self.model_tester.get_config() UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _UpperCAmelCase ( self ) -> List[Any]: if not self.model_tester.is_training: return UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss loss.backward() def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = True UpperCamelCase_ = True UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) UpperCamelCase_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) snake_case__ : List[Any] = 1E-4 def _snake_case (): UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_vision @slow class _a ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _UpperCAmelCase ( self ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ) UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) UpperCamelCase_ = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) # masks_queries_logits UpperCamelCase_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) UpperCamelCase_ = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) # class_queries_logits UpperCamelCase_ = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) UpperCamelCase_ = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase ) UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']] UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']] with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
23
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _snake_case (__lowercase): UpperCamelCase_ = FileLock(str(tmpdir / 'foo.lock')) UpperCamelCase_ = FileLock(str(tmpdir / 'foo.lock')) UpperCamelCase_ = 0.01 with locka.acquire(): with pytest.raises(__lowercase): UpperCamelCase_ = time.time() locka.acquire(__lowercase) assert time.time() - _start > timeout def _snake_case (__lowercase): UpperCamelCase_ = 'a' * 1000 + '.lock' UpperCamelCase_ = FileLock(str(tmpdir / filename)) assert locka._lock_file.endswith('.lock') assert not locka._lock_file.endswith(__lowercase) assert len(os.path.basename(locka._lock_file)) <= 255 UpperCamelCase_ = FileLock(tmpdir / filename) with locka.acquire(): with pytest.raises(__lowercase): locka.acquire(0)
23
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType snake_case__ : List[str] = logging.get_logger(__name__) class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """vision-encoder-decoder""" A_ = True def __init__( self , **_UpperCAmelCase ) -> Dict: super().__init__(**_UpperCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) UpperCamelCase_ = kwargs.pop('encoder' ) UpperCamelCase_ = encoder_config.pop('model_type' ) UpperCamelCase_ = kwargs.pop('decoder' ) UpperCamelCase_ = decoder_config.pop('model_type' ) UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = True @classmethod def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig: logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) UpperCamelCase_ = True UpperCamelCase_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = copy.deepcopy(self.__dict__ ) UpperCamelCase_ = self.encoder.to_dict() UpperCamelCase_ = self.decoder.to_dict() UpperCamelCase_ = self.__class__.model_type return output class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = version.parse("""1.11""" ) @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _UpperCAmelCase ( self ) -> float: return 1e-4 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} ) class _a ( UpperCAmelCase__ ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: UpperCamelCase_ = OrderedDict() UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'} return common_inputs def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]: import torch UpperCamelCase_ = OrderedDict() UpperCamelCase_ = super().generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCamelCase_ = dummy_input.pop('input_ids' ) UpperCamelCase_ = dummy_input.pop('attention_mask' ) UpperCamelCase_ = torch.zeros(_UpperCAmelCase ) return common_inputs class _a ( UpperCAmelCase__ ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> None: pass def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig: UpperCamelCase_ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
23
1
from statistics import mean, stdev def _snake_case (__lowercase , __lowercase = 3): UpperCamelCase_ = min(__lowercase) UpperCamelCase_ = max(__lowercase) # normalize data return [round((x - x_min) / (x_max - x_min) , __lowercase) for x in data] def _snake_case (__lowercase , __lowercase = 3): UpperCamelCase_ = mean(__lowercase) UpperCamelCase_ = stdev(__lowercase) # standardize data return [round((x - mu) / (sigma) , __lowercase) for x in data]
23
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _snake_case (__lowercase , __lowercase , __lowercase): # Initialise PyTorch model UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase) print(f"""Building PyTorch model from configuration: {config}""") UpperCamelCase_ = MobileBertForPreTraining(__lowercase) # Load weights from tf checkpoint UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , __lowercase) if __name__ == "__main__": snake_case__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
23
1
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") snake_case__ : List[str] = logging.getLogger(__name__) @dataclass class _a : """simple docstring""" A_ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) A_ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class _a : """simple docstring""" A_ = field(default=UpperCAmelCase__ , metadata={"""help""": """The input training data file (a text file)."""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) A_ = field( default=UpperCAmelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) A_ = field( default=UpperCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def _UpperCAmelCase ( self ) -> Union[str, Any]: if self.train_file is not None: UpperCamelCase_ = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCamelCase_ = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _a : """simple docstring""" A_ = 42 A_ = True A_ = None A_ = None def __call__( self , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = 'label' if 'label' in features[0].keys() else 'labels' UpperCamelCase_ = [feature.pop(_UpperCAmelCase ) for feature in features] UpperCamelCase_ = len(_UpperCAmelCase ) UpperCamelCase_ = len(features[0]['input_ids'] ) UpperCamelCase_ = [ [{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features ] UpperCamelCase_ = list(chain(*_UpperCAmelCase ) ) UpperCamelCase_ = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten UpperCamelCase_ = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels UpperCamelCase_ = torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) return batch def _snake_case (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __lowercase , __lowercase) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase_ = training_args.get_process_log_level() logger.setLevel(__lowercase) datasets.utils.logging.set_verbosity(__lowercase) transformers.utils.logging.set_verbosity(__lowercase) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""") logger.info(f"""Training/evaluation parameters {training_args}""") # Detecting last checkpoint. UpperCamelCase_ = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase_ = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.') elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCamelCase_ = {} if data_args.train_file is not None: UpperCamelCase_ = data_args.train_file if data_args.validation_file is not None: UpperCamelCase_ = data_args.validation_file UpperCamelCase_ = data_args.train_file.split('.')[-1] UpperCamelCase_ = load_dataset( __lowercase , data_files=__lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCamelCase_ = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCamelCase_ = [f"""ending{i}""" for i in range(4)] UpperCamelCase_ = 'sent1' UpperCamelCase_ = 'sent2' if data_args.max_seq_length is None: UpperCamelCase_ = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.') UpperCamelCase_ = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""") UpperCamelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length) # Preprocessing the datasets. def preprocess_function(__lowercase): UpperCamelCase_ = [[context] * 4 for context in examples[context_name]] UpperCamelCase_ = examples[question_header_name] UpperCamelCase_ = [ [f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__lowercase) ] # Flatten out UpperCamelCase_ = list(chain(*__lowercase)) UpperCamelCase_ = list(chain(*__lowercase)) # Tokenize UpperCamelCase_ = tokenizer( __lowercase , __lowercase , truncation=__lowercase , max_length=__lowercase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__lowercase) , 4)] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset') UpperCamelCase_ = raw_datasets['train'] if data_args.max_train_samples is not None: UpperCamelCase_ = min(len(__lowercase) , data_args.max_train_samples) UpperCamelCase_ = train_dataset.select(range(__lowercase)) with training_args.main_process_first(desc='train dataset map pre-processing'): UpperCamelCase_ = train_dataset.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset') UpperCamelCase_ = raw_datasets['validation'] if data_args.max_eval_samples is not None: UpperCamelCase_ = min(len(__lowercase) , data_args.max_eval_samples) UpperCamelCase_ = eval_dataset.select(range(__lowercase)) with training_args.main_process_first(desc='validation dataset map pre-processing'): UpperCamelCase_ = eval_dataset.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCamelCase_ = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__lowercase , pad_to_multiple_of=8 if training_args.fpaa else None) ) # Metric def compute_metrics(__lowercase): UpperCamelCase_ , UpperCamelCase_ = eval_predictions UpperCamelCase_ = np.argmax(__lowercase , axis=1) return {"accuracy": (preds == label_ids).astype(np.floataa).mean().item()} # Initialize our Trainer UpperCamelCase_ = Trainer( model=__lowercase , args=__lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , compute_metrics=__lowercase , ) # Training if training_args.do_train: UpperCamelCase_ = None if training_args.resume_from_checkpoint is not None: UpperCamelCase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase_ = last_checkpoint UpperCamelCase_ = trainer.train(resume_from_checkpoint=__lowercase) trainer.save_model() # Saves the tokenizer too for easy upload UpperCamelCase_ = train_result.metrics UpperCamelCase_ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase) ) UpperCamelCase_ = min(__lowercase , len(__lowercase)) trainer.log_metrics('train' , __lowercase) trainer.save_metrics('train' , __lowercase) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***') UpperCamelCase_ = trainer.evaluate() UpperCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase) UpperCamelCase_ = min(__lowercase , len(__lowercase)) trainer.log_metrics('eval' , __lowercase) trainer.save_metrics('eval' , __lowercase) UpperCamelCase_ = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase) else: trainer.create_model_card(**__lowercase) def _snake_case (__lowercase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
23
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _a ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_MASKED_LM_MAPPING A_ = TF_MODEL_FOR_MASKED_LM_MAPPING def _UpperCAmelCase ( self ) -> List[str]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() UpperCamelCase_ = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(_UpperCAmelCase ) @slow @require_tf def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.2_5_1, 'token': 2201, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.2_1_4, 'token': 12790, 'token_str': ' Lyon', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) @require_tf def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = fill_masker.tokenizer UpperCamelCase_ = fill_masker.model UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , ) with self.assertRaises(_UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_UpperCAmelCase ): fill_masker('This is' ) self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = sorted(vocab.keys() )[:2] # Pipeline argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Call argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Score equivalence UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs] UpperCamelCase_ = [top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ) == set(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # top_k=2, ntargets=3 UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.get_vocab() # String duplicates + id duplicates UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_UpperCAmelCase ) , 3 ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , )
23
1
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase): UpperCamelCase_ , UpperCamelCase_ = len(__lowercase), len(grid[0]) if ( min(__lowercase , __lowercase) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col)) UpperCamelCase_ = 0 count += depth_first_search(__lowercase , row + 1 , __lowercase , __lowercase) count += depth_first_search(__lowercase , row - 1 , __lowercase , __lowercase) count += depth_first_search(__lowercase , __lowercase , col + 1 , __lowercase) count += depth_first_search(__lowercase , __lowercase , col - 1 , __lowercase) visit.remove((row, col)) return count if __name__ == "__main__": import doctest doctest.testmod()
23
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = StableDiffusionSAGPipeline A_ = TEXT_TO_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_BATCH_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) UpperCamelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase ) UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]: if str(_UpperCAmelCase ).startswith('mps' ): UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase ) else: UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCamelCase_ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def _UpperCAmelCase ( self ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) UpperCamelCase_ = output.images assert image.shape == (1, 512, 768, 3)
23
1
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _snake_case (__lowercase , __lowercase): return math.sqrt(sum(pow(a - b , 2) for a, b in zip(__lowercase , __lowercase))) def _snake_case (__lowercase , __lowercase): if dataset.ndim != value_array.ndim: UpperCamelCase_ = ( 'Wrong input data\'s dimensions... ' f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(__lowercase) try: if dataset.shape[1] != value_array.shape[1]: UpperCamelCase_ = ( 'Wrong input data\'s shape... ' f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(__lowercase) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape') if dataset.dtype != value_array.dtype: UpperCamelCase_ = ( 'Input data have different datatype... ' f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(__lowercase) UpperCamelCase_ = [] for value in value_array: UpperCamelCase_ = euclidean(__lowercase , dataset[0]) UpperCamelCase_ = dataset[0].tolist() for dataset_value in dataset[1:]: UpperCamelCase_ = euclidean(__lowercase , __lowercase) if dist > temp_dist: UpperCamelCase_ = temp_dist UpperCamelCase_ = dataset_value.tolist() answer.append([vector, dist]) return answer def _snake_case (__lowercase , __lowercase): return np.dot(__lowercase , __lowercase) / (norm(__lowercase) * norm(__lowercase)) if __name__ == "__main__": import doctest doctest.testmod()
23
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar snake_case__ : List[str] = TypeVar("""T""") def _snake_case (__lowercase): return (position - 1) // 2 def _snake_case (__lowercase): return (2 * position) + 1 def _snake_case (__lowercase): return (2 * position) + 2 class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = [] UpperCamelCase_ = {} UpperCamelCase_ = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def _UpperCAmelCase ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) UpperCamelCase_ = self.elements self.elements += 1 self._bubble_up(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) UpperCamelCase_ , UpperCamelCase_ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: UpperCamelCase_ , UpperCamelCase_ = self.heap[0] self._bubble_down(_UpperCAmelCase ) return elem def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Update the weight of the given key UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ = (elem, weight) if position > 0: UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] UpperCamelCase_ = self.position_map[elem] if curr_pos == 0: return None UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_up(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ = get_child_left_position(_UpperCAmelCase ) UpperCamelCase_ = get_child_right_position(_UpperCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) if child_left_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) else: return None if child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Swap the nodes at the given positions UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ , UpperCamelCase_ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) UpperCamelCase_ = nodea_pos UpperCamelCase_ = nodea_pos class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = {} UpperCamelCase_ = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: UpperCamelCase_ = {} self.nodes += 1 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(_UpperCAmelCase ) self.add_node(_UpperCAmelCase ) UpperCamelCase_ = weight UpperCamelCase_ = weight def _snake_case (__lowercase , ): UpperCamelCase_ = {node: maxsize for node in graph.connections} UpperCamelCase_ = {node: None for node in graph.connections} UpperCamelCase_ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__lowercase , __lowercase) if priority_queue.is_empty(): return dist, parent # initialization UpperCamelCase_ = priority_queue.extract_min() UpperCamelCase_ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node # running prim's algorithm while not priority_queue.is_empty(): UpperCamelCase_ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node return dist, parent
23
1
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ : Optional[Any] = logging.getLogger(__name__) @dataclass(frozen=UpperCAmelCase__ ) class _a : """simple docstring""" A_ = 42 A_ = 42 A_ = None A_ = None A_ = None @dataclass(frozen=UpperCAmelCase__ ) class _a : """simple docstring""" A_ = 42 A_ = None A_ = None A_ = None A_ = None if is_torch_available(): import torch from torch.utils.data import Dataset class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Optional[Any]: UpperCamelCase_ = hans_processors[task]() UpperCamelCase_ = os.path.join( _UpperCAmelCase , 'cached_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , ) UpperCamelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1] UpperCamelCase_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCamelCase_ = cached_features_file + '.lock' with FileLock(_UpperCAmelCase ): if os.path.exists(_UpperCAmelCase ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) UpperCamelCase_ = torch.load(_UpperCAmelCase ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) UpperCamelCase_ = ( processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase ) ) logger.info('Training examples: %s' , len(_UpperCAmelCase ) ) UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) logger.info('Saving features into cached file %s' , _UpperCAmelCase ) torch.save(self.features , _UpperCAmelCase ) def __len__( self ) -> int: return len(self.features ) def __getitem__( self , _UpperCAmelCase ) -> InputFeatures: return self.features[i] def _UpperCAmelCase ( self ) -> List[Any]: return self.label_list if is_tf_available(): import tensorflow as tf class _a : """simple docstring""" A_ = 42 def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Dict: UpperCamelCase_ = hans_processors[task]() UpperCamelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1] UpperCamelCase_ = label_list UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase ) UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) UpperCamelCase_ = tf.data.Dataset.from_generator( _UpperCAmelCase , ( { 'example_id': tf.intaa, 'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa, }, tf.intaa, ) , ( { 'example_id': tf.TensorShape([] ), 'input_ids': tf.TensorShape([None, None] ), 'attention_mask': tf.TensorShape([None, None] ), 'token_type_ids': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _UpperCAmelCase ( self ) -> List[Any]: return self.dataset def __len__( self ) -> str: return len(self.features ) def __getitem__( self , _UpperCAmelCase ) -> InputFeatures: return self.features[i] def _UpperCAmelCase ( self ) -> List[str]: return self.label_list class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]: return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]: return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' ) def _UpperCAmelCase ( self ) -> List[str]: return ["contradiction", "entailment", "neutral"] def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: UpperCamelCase_ = [] for i, line in enumerate(_UpperCAmelCase ): if i == 0: continue UpperCamelCase_ = '%s-%s' % (set_type, line[0]) UpperCamelCase_ = line[5] UpperCamelCase_ = line[6] UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7] UpperCamelCase_ = line[0] examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) ) return examples def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ): UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)} UpperCamelCase_ = [] for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'): if ex_index % 10000 == 0: logger.info('Writing example %d' % (ex_index)) UpperCamelCase_ = tokenizer( example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , ) UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0 UpperCamelCase_ = int(example.pairID) features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase)) for i, example in enumerate(examples[:5]): logger.info('*** Example ***') logger.info(f"""guid: {example}""") logger.info(f"""features: {features[i]}""") return features snake_case__ : str = { """hans""": 3, } snake_case__ : Optional[Any] = { """hans""": HansProcessor, }
23
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar snake_case__ : Dict = TypeVar("""T""") class _a ( Generic[T] ): """simple docstring""" A_ = 42 # Cache store of keys A_ = 42 # References of the keys in cache A_ = 10 # Maximum capacity of cache def __init__( self , _UpperCAmelCase ) -> None: UpperCamelCase_ = deque() UpperCamelCase_ = set() if not n: UpperCamelCase_ = sys.maxsize elif n < 0: raise ValueError('n should be an integer greater than 0.' ) else: UpperCamelCase_ = n def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: UpperCamelCase_ = self.dq_store.pop() self.key_reference.remove(_UpperCAmelCase ) else: self.dq_store.remove(_UpperCAmelCase ) self.dq_store.appendleft(_UpperCAmelCase ) self.key_reference.add(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> None: for k in self.dq_store: print(_UpperCAmelCase ) def __repr__( self ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : LRUCache[str | int] = LRUCache(4) lru_cache.refer("""A""") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("""A""") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
23
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Any = {"""vocab_file""": """vocab.txt"""} snake_case__ : int = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } snake_case__ : int = { """openbmb/cpm-ant-10b""": 1_0_2_4, } def _snake_case (__lowercase): UpperCamelCase_ = collections.OrderedDict() with open(__lowercase , 'r' , encoding='utf-8') as reader: UpperCamelCase_ = reader.readlines() for index, token in enumerate(__lowercase): UpperCamelCase_ = token.rstrip('\n') UpperCamelCase_ = index return vocab class _a ( UpperCAmelCase__ ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase=200 ) -> Any: UpperCamelCase_ = vocab UpperCamelCase_ = unk_token UpperCamelCase_ = max_input_chars_per_word def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = list(_UpperCAmelCase ) if len(_UpperCAmelCase ) > self.max_input_chars_per_word: return [self.unk_token] UpperCamelCase_ = 0 UpperCamelCase_ = [] while start < len(_UpperCAmelCase ): UpperCamelCase_ = len(_UpperCAmelCase ) UpperCamelCase_ = None while start < end: UpperCamelCase_ = ''.join(chars[start:end] ) if substr in self.vocab: UpperCamelCase_ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_UpperCAmelCase ) UpperCamelCase_ = end return sub_tokens class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ["""input_ids""", """attention_mask"""] A_ = False def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<d>" , _UpperCAmelCase="</d>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="</n>" , _UpperCAmelCase="</_>" , _UpperCAmelCase="left" , **_UpperCAmelCase , ) -> List[Any]: requires_backends(self , ['jieba'] ) super().__init__( bod_token=_UpperCAmelCase , eod_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , line_token=_UpperCAmelCase , space_token=_UpperCAmelCase , padding_side=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCamelCase_ = bod_token UpperCamelCase_ = eod_token UpperCamelCase_ = load_vocab(_UpperCAmelCase ) UpperCamelCase_ = self.encoder[space_token] UpperCamelCase_ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCAmelCase : x[1] ) ) UpperCamelCase_ = {v: k for k, v in self.encoder.items()} UpperCamelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _UpperCAmelCase ( self ) -> Optional[Any]: return self.encoder[self.bod_token] @property def _UpperCAmelCase ( self ) -> List[Any]: return self.encoder[self.eod_token] @property def _UpperCAmelCase ( self ) -> Any: return self.encoder["\n"] @property def _UpperCAmelCase ( self ) -> int: return len(self.encoder ) def _UpperCAmelCase ( self ) -> str: return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = [] for x in jieba.cut(_UpperCAmelCase , cut_all=_UpperCAmelCase ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCAmelCase ) ) return output_tokens def _UpperCAmelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = [i for i in token_ids if i >= 0] UpperCamelCase_ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str: return token in self.encoder def _UpperCAmelCase ( self , _UpperCAmelCase ) -> str: return "".join(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int: return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]: return self.decoder.get(_UpperCAmelCase , self.unk_token ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]: if os.path.isdir(_UpperCAmelCase ): UpperCamelCase_ = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: UpperCamelCase_ = (filename_prefix + '-' if filename_prefix else '') + save_directory UpperCamelCase_ = 0 if " " in self.encoder: UpperCamelCase_ = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: UpperCamelCase_ = self.encoder['\n'] del self.encoder["\n"] UpperCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCAmelCase : x[1] ) ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) UpperCamelCase_ = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) return [1] + ([0] * len(_UpperCAmelCase ))
23
import numpy as np def _snake_case (__lowercase): return 1 / (1 + np.exp(-vector)) def _snake_case (__lowercase): return vector * sigmoid(__lowercase) if __name__ == "__main__": import doctest doctest.testmod()
23
1
def _snake_case (__lowercase): UpperCamelCase_ = int(__lowercase) if decimal in (0, 1): # Exit cases for the recursion return str(__lowercase) UpperCamelCase_ , UpperCamelCase_ = divmod(__lowercase , 2) return binary_recursive(__lowercase) + str(__lowercase) def _snake_case (__lowercase): UpperCamelCase_ = str(__lowercase).strip() if not number: raise ValueError('No input value was provided') UpperCamelCase_ = '-' if number.startswith('-') else '' UpperCamelCase_ = number.lstrip('-') if not number.isnumeric(): raise ValueError('Input value is not an integer') return f"""{negative}0b{binary_recursive(int(__lowercase))}""" if __name__ == "__main__": from doctest import testmod testmod()
23
import math from datetime import datetime, timedelta def _snake_case (__lowercase): UpperCamelCase_ = year % 19 UpperCamelCase_ = year % 4 UpperCamelCase_ = year % 7 UpperCamelCase_ = math.floor(year / 100) UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25) UpperCamelCase_ = leap_day_inhibits / 4 UpperCamelCase_ = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon UpperCamelCase_ = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 19) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 18) else: return datetime(__lowercase , 3 , 22) + timedelta( days=int(days_to_add + days_from_phm_to_sunday)) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): snake_case__ : Dict = """will be""" if year > datetime.now().year else """was""" print(f'Easter in {year} {tense} {gauss_easter(year)}')
23
1
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case (): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT): with pytest.raises(__lowercase): requests.request('GET' , 'https://huggingface.co') with pytest.raises(requests.exceptions.ConnectTimeout): requests.request('GET' , 'https://huggingface.co' , timeout=1.0) @pytest.mark.integration def _snake_case (): with offline(OfflineSimulationMode.CONNECTION_FAILS): with pytest.raises(requests.exceptions.ConnectionError): requests.request('GET' , 'https://huggingface.co') def _snake_case (): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1): with pytest.raises(__lowercase): http_head('https://huggingface.co')
23
import requests def _snake_case (__lowercase , __lowercase): UpperCamelCase_ = {'Content-Type': 'application/json'} UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase) if response.status_code != 200: UpperCamelCase_ = ( 'Request to slack returned an error ' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(__lowercase) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
23
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _a : """simple docstring""" A_ = PegasusConfig A_ = {} A_ = """gelu""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=40 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Tuple: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = eos_token_id UpperCamelCase_ = pad_token_id UpperCamelCase_ = bos_token_id def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_ = prepare_pegasus_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = TFPegasusModel(config=_UpperCAmelCase ).get_decoder() UpperCamelCase_ = inputs_dict['input_ids'] UpperCamelCase_ = input_ids[:1, :] UpperCamelCase_ = inputs_dict['attention_mask'][:1, :] UpperCamelCase_ = inputs_dict['head_mask'] UpperCamelCase_ = 1 # first forward pass UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx] UpperCamelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 ) def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ): if attention_mask is None: UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta) if decoder_attention_mask is None: UpperCamelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta), ] , axis=-1 , ) if head_mask is None: UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () A_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () A_ = ( { """conversational""": TFPegasusForConditionalGeneration, """feature-extraction""": TFPegasusModel, """summarization""": TFPegasusForConditionalGeneration, """text2text-generation""": TFPegasusForConditionalGeneration, """translation""": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) A_ = True A_ = False A_ = False def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = TFPegasusModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _a ( unittest.TestCase ): """simple docstring""" A_ = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] A_ = [ """California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to""" """ reduce the risk of wildfires.""", """N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""", ] # differs slightly from pytorch, likely due to numerical differences in linear layers A_ = """google/pegasus-xsum""" @cached_property def _UpperCAmelCase ( self ) -> Dict: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase ) assert self.expected_text == generated_words def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='tf' ) UpperCamelCase_ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_UpperCAmelCase , ) UpperCamelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def _UpperCAmelCase ( self ) -> Optional[Any]: self._assert_generated_batch_equal_expected()
23
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) UpperCamelCase_ = input_file.read() UpperCamelCase_ = regexp.search(_UpperCAmelCase ) return match def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) UpperCamelCase_ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase_ = regexp.finditer(_UpperCAmelCase ) UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(_UpperCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
23
1
from math import factorial def _snake_case (__lowercase = 20): UpperCamelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCamelCase_ = n // 2 return int(factorial(__lowercase) / (factorial(__lowercase) * factorial(n - k))) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: snake_case__ : Any = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number.""")
23
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ): set_seed(3) # generate train_data and objective_set UpperCamelCase_ , UpperCamelCase_ = generate_datasets( __lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase) # keeps model same across runs set_seed(4) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # load pretrained model UpperCamelCase_ = load_gpta('gpt2').to(__lowercase) print('computing perplexity on objective set') UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item() print('perplexity on objective set:' , __lowercase) # collect igf pairs and save to file demo.jbl collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ): set_seed(42) # Load pre-trained model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') # Initialize secondary learner to use embedding weights of model UpperCamelCase_ = SecondaryLearner(__lowercase) # Train secondary learner UpperCamelCase_ = train_secondary_learner( __lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ): UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') UpperCamelCase_ = RandomSampler(__lowercase) UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase) UpperCamelCase_ = max_steps // (len(__lowercase)) + 1 UpperCamelCase_ = 0 UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase) model.train() if secondary_learner is not None: secondary_learner.to(__lowercase) secondary_learner.eval() UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [] UpperCamelCase_ = [] # Compute the performance of the transformer model at the beginning UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) for epoch in range(int(__lowercase)): for step, example in enumerate(__lowercase): torch.cuda.empty_cache() UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1) UpperCamelCase_ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase_ = model(__lowercase , labels=__lowercase) UpperCamelCase_ = True if secondary_learner is not None: UpperCamelCase_ = secondary_learner.forward( torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item() observed_qs.append(float(__lowercase)) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase_ = -1 if predicted_q < threshold: UpperCamelCase_ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu())) UpperCamelCase_ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase_ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __lowercase) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task') # Required parameters parser.add_argument( '--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=__lowercase , default=__lowercase , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.') parser.add_argument( '--context_len' , default=32 , type=__lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq') parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs') parser.add_argument( '--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ') parser.add_argument( '--eval_interval' , default=10 , type=__lowercase , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data') parser.add_argument( '--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set') parser.add_argument( '--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner') parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length') parser.add_argument( '--threshold' , default=1.0 , type=__lowercase , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name') parser.add_argument( '--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner UpperCamelCase_ = joblib.load('data/IGF_values.jbl') # Train secondary learner UpperCamelCase_ = training_secondary_learner( __lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') set_seed(42) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase_ , UpperCamelCase_ = generate_datasets( context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
23
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask snake_case__ : Any = logging.getLogger(__name__) class _a ( UpperCAmelCase__ ): """simple docstring""" def __init__( self , _UpperCAmelCase=-1 ) -> str: # in NER datasets, the last column is usually reserved for NER label UpperCamelCase_ = label_idx def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[InputExample]: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCamelCase_ = mode.value UpperCamelCase_ = os.path.join(_UpperCAmelCase , f"""{mode}.txt""" ) UpperCamelCase_ = 1 UpperCamelCase_ = [] with open(_UpperCAmelCase , encoding='utf-8' ) as f: UpperCamelCase_ = [] UpperCamelCase_ = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) ) guid_index += 1 UpperCamelCase_ = [] UpperCamelCase_ = [] else: UpperCamelCase_ = line.split(' ' ) words.append(splits[0] ) if len(_UpperCAmelCase ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) ) return examples def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(_UpperCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: UpperCamelCase_ = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(_UpperCAmelCase ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]: if path: with open(_UpperCAmelCase , 'r' ) as f: UpperCamelCase_ = f.read().splitlines() if "O" not in labels: UpperCamelCase_ = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class _a ( UpperCAmelCase__ ): """simple docstring""" def __init__( self ) -> Optional[int]: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]: if path: with open(_UpperCAmelCase , 'r' ) as f: UpperCamelCase_ = f.read().splitlines() if "O" not in labels: UpperCamelCase_ = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[InputExample]: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCamelCase_ = mode.value UpperCamelCase_ = os.path.join(_UpperCAmelCase , f"""{mode}.txt""" ) UpperCamelCase_ = 1 UpperCamelCase_ = [] with open(_UpperCAmelCase , encoding='utf-8' ) as f: for sentence in parse_incr(_UpperCAmelCase ): UpperCamelCase_ = [] UpperCamelCase_ = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_UpperCAmelCase , labels=_UpperCAmelCase ) ) guid_index += 1 return examples def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: UpperCamelCase_ = 0 for sentence in parse_incr(_UpperCAmelCase ): UpperCamelCase_ = preds_list[example_id] UpperCamelCase_ = '' for token in sentence: out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """ out += "\n" writer.write(_UpperCAmelCase ) example_id += 1 def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]: if path: with open(_UpperCAmelCase , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
23
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _a : """simple docstring""" A_ = MBartConfig A_ = {} A_ = """gelu""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = eos_token_id UpperCamelCase_ = pad_token_id UpperCamelCase_ = bos_token_id def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder() UpperCamelCase_ = inputs_dict['input_ids'] UpperCamelCase_ = input_ids[:1, :] UpperCamelCase_ = inputs_dict['attention_mask'][:1, :] UpperCamelCase_ = inputs_dict['head_mask'] UpperCamelCase_ = 1 # first forward pass UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple() UpperCamelCase_ = past_key_values[1] def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ): if attention_mask is None: UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta) if decoder_attention_mask is None: UpperCamelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta), ] , axis=-1 , ) if head_mask is None: UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else () A_ = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) A_ = True A_ = False A_ = False def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = TFMBartModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _a ( unittest.TestCase ): """simple docstring""" A_ = [ """ UN Chief Says There Is No Military Solution in Syria""", ] A_ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] A_ = """facebook/mbart-large-en-ro""" @cached_property def _UpperCAmelCase ( self ) -> Any: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int: UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase ) self.assertListEqual(self.expected_text , _UpperCAmelCase ) def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]: UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' ) UpperCamelCase_ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def _UpperCAmelCase ( self ) -> List[Any]: self._assert_generated_batch_equal_expected()
23
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available snake_case__ : Optional[Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Any = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys snake_case__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
def _snake_case (__lowercase): UpperCamelCase_ = 1 for i in range(1 , num + 1): fact *= i return fact def _snake_case (__lowercase): UpperCamelCase_ = 0 while number > 0: UpperCamelCase_ = number % 10 sum_of_digits += last_digit UpperCamelCase_ = number // 10 # Removing the last_digit from the given number return sum_of_digits def _snake_case (__lowercase = 100): UpperCamelCase_ = factorial(__lowercase) UpperCamelCase_ = split_and_add(__lowercase) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
23
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def _snake_case (__lowercase , __lowercase): UpperCamelCase_ = checkpoint UpperCamelCase_ = {} UpperCamelCase_ = vae_state_dict['encoder.conv_in.weight'] UpperCamelCase_ = vae_state_dict['encoder.conv_in.bias'] UpperCamelCase_ = vae_state_dict['encoder.conv_out.weight'] UpperCamelCase_ = vae_state_dict['encoder.conv_out.bias'] UpperCamelCase_ = vae_state_dict['encoder.norm_out.weight'] UpperCamelCase_ = vae_state_dict['encoder.norm_out.bias'] UpperCamelCase_ = vae_state_dict['decoder.conv_in.weight'] UpperCamelCase_ = vae_state_dict['decoder.conv_in.bias'] UpperCamelCase_ = vae_state_dict['decoder.conv_out.weight'] UpperCamelCase_ = vae_state_dict['decoder.conv_out.bias'] UpperCamelCase_ = vae_state_dict['decoder.norm_out.weight'] UpperCamelCase_ = vae_state_dict['decoder.norm_out.bias'] UpperCamelCase_ = vae_state_dict['quant_conv.weight'] UpperCamelCase_ = vae_state_dict['quant_conv.bias'] UpperCamelCase_ = vae_state_dict['post_quant_conv.weight'] UpperCamelCase_ = vae_state_dict['post_quant_conv.bias'] # Retrieves the keys for the encoder down blocks only UpperCamelCase_ = len({'.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'encoder.down' in layer}) UpperCamelCase_ = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowercase) } # Retrieves the keys for the decoder up blocks only UpperCamelCase_ = len({'.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'decoder.up' in layer}) UpperCamelCase_ = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowercase) } for i in range(__lowercase): UpperCamelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: UpperCamelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""") UpperCamelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""") UpperCamelCase_ = renew_vae_resnet_paths(__lowercase) UpperCamelCase_ = {'old': f"""down.{i}.block""", 'new': f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase) UpperCamelCase_ = [key for key in vae_state_dict if 'encoder.mid.block' in key] UpperCamelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1): UpperCamelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] UpperCamelCase_ = renew_vae_resnet_paths(__lowercase) UpperCamelCase_ = {'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase) UpperCamelCase_ = [key for key in vae_state_dict if 'encoder.mid.attn' in key] UpperCamelCase_ = renew_vae_attention_paths(__lowercase) UpperCamelCase_ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase) conv_attn_to_linear(__lowercase) for i in range(__lowercase): UpperCamelCase_ = num_up_blocks - 1 - i UpperCamelCase_ = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: UpperCamelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] UpperCamelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] UpperCamelCase_ = renew_vae_resnet_paths(__lowercase) UpperCamelCase_ = {'old': f"""up.{block_id}.block""", 'new': f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase) UpperCamelCase_ = [key for key in vae_state_dict if 'decoder.mid.block' in key] UpperCamelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1): UpperCamelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] UpperCamelCase_ = renew_vae_resnet_paths(__lowercase) UpperCamelCase_ = {'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase) UpperCamelCase_ = [key for key in vae_state_dict if 'decoder.mid.attn' in key] UpperCamelCase_ = renew_vae_attention_paths(__lowercase) UpperCamelCase_ = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase) conv_attn_to_linear(__lowercase) return new_checkpoint def _snake_case (__lowercase , __lowercase , ): # Only support V1 UpperCamelCase_ = requests.get( ' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml') UpperCamelCase_ = io.BytesIO(r.content) UpperCamelCase_ = OmegaConf.load(__lowercase) UpperCamelCase_ = 512 UpperCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu' if checkpoint_path.endswith('safetensors'): from safetensors import safe_open UpperCamelCase_ = {} with safe_open(__lowercase , framework='pt' , device='cpu') as f: for key in f.keys(): UpperCamelCase_ = f.get_tensor(__lowercase) else: UpperCamelCase_ = torch.load(__lowercase , map_location=__lowercase)['state_dict'] # Convert the VAE model. UpperCamelCase_ = create_vae_diffusers_config(__lowercase , image_size=__lowercase) UpperCamelCase_ = custom_convert_ldm_vae_checkpoint(__lowercase , __lowercase) UpperCamelCase_ = AutoencoderKL(**__lowercase) vae.load_state_dict(__lowercase) vae.save_pretrained(__lowercase) if __name__ == "__main__": snake_case__ : int = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") snake_case__ : Union[str, Any] = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
23
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case__ : str = logging.get_logger(__name__) def _snake_case (__lowercase): if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(__lowercase): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""") class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = ["""pixel_values"""] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None: super().__init__(**_UpperCAmelCase ) UpperCamelCase_ = size if size is not None else {'shortest_edge': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' ) UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = do_center_crop UpperCamelCase_ = crop_size UpperCamelCase_ = resample UpperCamelCase_ = do_rescale UpperCamelCase_ = rescale_factor UpperCamelCase_ = do_normalize UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" in size: UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase ) elif "height" in size and "width" in size: UpperCamelCase_ = (size['height'], size['width']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int: return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. UpperCamelCase_ = to_numpy_array(_UpperCAmelCase ) if do_resize: UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) if do_center_crop: UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase ) if do_rescale: UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) if do_normalize: UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) return image def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image: UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' ) if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) UpperCamelCase_ = make_batched(_UpperCAmelCase ) UpperCamelCase_ = [ [ self._preprocess_image( image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , ) for img in video ] for video in videos ] UpperCamelCase_ = {'pixel_values': videos} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
23
1
def _snake_case (__lowercase): UpperCamelCase_ = [0] * len(__lowercase) UpperCamelCase_ = [] UpperCamelCase_ = [1] * len(__lowercase) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__lowercase)): if indegree[i] == 0: queue.append(__lowercase) while queue: UpperCamelCase_ = queue.pop(0) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase_ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__lowercase) print(max(__lowercase)) # Adjacency list of Graph snake_case__ : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
23
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 A_ = 42 class _a ( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" A_ = 1 @register_to_config def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple: # standard deviation of the initial noise distribution UpperCamelCase_ = sigma_max # setable values UpperCamelCase_ = None self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor: return sample def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str: UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any: UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) UpperCamelCase_ = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device ) UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device ) UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device ) UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase ) UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods UpperCamelCase_ = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): UpperCamelCase_ = diffusion.unsqueeze(-1 ) UpperCamelCase_ = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of UpperCamelCase_ = randn_tensor( sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype ) UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term UpperCamelCase_ = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): UpperCamelCase_ = step_size.unsqueeze(-1 ) UpperCamelCase_ = sample + step_size * model_output UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCamelCase_ = timesteps.to(original_samples.device ) UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps] UpperCamelCase_ = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None] ) UpperCamelCase_ = noise + original_samples return noisy_samples def __len__( self ) -> Optional[int]: return self.config.num_train_timesteps
23
1
import math def _snake_case (__lowercase): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__lowercase) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case (__lowercase = 10001): try: UpperCamelCase_ = int(__lowercase) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.') from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.') UpperCamelCase_ = [] UpperCamelCase_ = 2 while len(__lowercase) < nth: if is_prime(__lowercase): primes.append(__lowercase) num += 1 else: num += 1 return primes[len(__lowercase) - 1] if __name__ == "__main__": print(f'{solution() = }')
23
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : Optional[int] = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys snake_case__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
1
def _snake_case (__lowercase = 10 , __lowercase = 22): UpperCamelCase_ = range(1 , __lowercase) UpperCamelCase_ = range(1 , __lowercase) return sum( 1 for power in powers for base in bases if len(str(base**power)) == power) if __name__ == "__main__": print(f'{solution(1_0, 2_2) = }')
23
import datasets from .evaluate import evaluate snake_case__ : int = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ snake_case__ : Union[str, Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ snake_case__ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def _UpperCAmelCase ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': { 'id': datasets.Value('string' ), 'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ), }, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions} UpperCamelCase_ = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase ) return score
23
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : Any = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[str] = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class _a ( datasets.BeamBasedBuilder ): """simple docstring""" def _UpperCAmelCase ( self ) -> List[str]: return datasets.DatasetInfo( features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=_UpperCAmelCase , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )] def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) class _a ( datasets.BeamBasedBuilder ): """simple docstring""" def _UpperCAmelCase ( self ) -> Any: return datasets.DatasetInfo( features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=_UpperCAmelCase , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} ) ] def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase ) def _snake_case (): return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])] def _snake_case (): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])] class _a ( UpperCAmelCase__ ): """simple docstring""" @require_beam def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def _UpperCAmelCase ( self ) -> List[str]: import apache_beam as beam UpperCamelCase_ = beam.io.parquetio.WriteToParquet UpperCamelCase_ = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock: UpperCamelCase_ = partial(_UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( _UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def _UpperCAmelCase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = DummyBeamDataset(cache_dir=_UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCamelCase_ = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) ) UpperCamelCase_ = builder.as_dataset() self.assertEqual(dset['train'].num_rows , _UpperCAmelCase ) self.assertEqual(dset['train'].info.splits['train'].num_examples , _UpperCAmelCase ) self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_UpperCAmelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset
23
1
snake_case__ : Union[str, Any] = """Input must be a string of 8 numbers plus letter""" snake_case__ : Optional[int] = """TRWAGMYFPDXBNJZSQVHLCKE""" def _snake_case (__lowercase): if not isinstance(__lowercase , __lowercase): UpperCamelCase_ = f"""Expected string as input, found {type(__lowercase).__name__}""" raise TypeError(__lowercase) UpperCamelCase_ = spanish_id.replace('-' , '').upper() if len(__lowercase) != 9: raise ValueError(__lowercase) try: UpperCamelCase_ = int(spanish_id_clean[0:8]) UpperCamelCase_ = spanish_id_clean[8] except ValueError as ex: raise ValueError(__lowercase) from ex if letter.isdigit(): raise ValueError(__lowercase) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
23
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _snake_case (__lowercase , __lowercase , __lowercase): # Initialise PyTorch model UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase) print(f"""Building PyTorch model from configuration: {config}""") UpperCamelCase_ = AlbertForPreTraining(__lowercase) # Load weights from tf checkpoint load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , __lowercase) if __name__ == "__main__": snake_case__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
23
1
def _snake_case (__lowercase): UpperCamelCase_ = int(__lowercase) if n_element < 1: UpperCamelCase_ = ValueError('a should be a positive number') raise my_error UpperCamelCase_ = [1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0) UpperCamelCase_ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5)) index += 1 return hamming_list if __name__ == "__main__": snake_case__ : str = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") snake_case__ : Optional[Any] = hamming(int(n)) print("""-----------------------------------------------------""") print(f'The list with nth numbers is: {hamming_numbers}') print("""-----------------------------------------------------""")
23
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class _a ( UpperCAmelCase__ ): """simple docstring""" @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' ) UpperCamelCase_ = bertabert.config.encoder.vocab_size UpperCamelCase_ = tokenizer.sep_token_id UpperCamelCase_ = tokenizer.cls_token_id UpperCamelCase_ = 128 UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) UpperCamelCase_ = train_dataset.select(range(32 ) ) UpperCamelCase_ = val_dataset.select(range(16 ) ) UpperCamelCase_ = 4 def _map_to_encoder_decoder_inputs(_UpperCAmelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 ) UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 ) UpperCamelCase_ = inputs.input_ids UpperCamelCase_ = inputs.attention_mask UpperCamelCase_ = outputs.input_ids UpperCamelCase_ = outputs.input_ids.copy() UpperCamelCase_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] UpperCamelCase_ = outputs.attention_mask assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids ) assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_UpperCAmelCase ): UpperCamelCase_ = pred.label_ids UpperCamelCase_ = pred.predictions # all unnecessary tokens are removed UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase ) return {"accuracy": accuracy} # map train dataset UpperCamelCase_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset UpperCamelCase_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) UpperCamelCase_ = self.get_auto_remove_tmp_dir() UpperCamelCase_ = SeqaSeqTrainingArguments( output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer UpperCamelCase_ = SeqaSeqTrainer( model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , ) # start training trainer.train()
23
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available snake_case__ : Optional[Any] = { """configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""], """tokenization_tapas""": ["""TapasTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = [ """TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""", """TapasForMaskedLM""", """TapasForQuestionAnswering""", """TapasForSequenceClassification""", """TapasModel""", """TapasPreTrainedModel""", """load_tf_weights_in_tapas""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[Any] = [ """TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFTapasForMaskedLM""", """TFTapasForQuestionAnswering""", """TFTapasForSequenceClassification""", """TFTapasModel""", """TFTapasPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys snake_case__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ : Dict = 1_6 snake_case__ : List[str] = 3_2 def _snake_case (__lowercase , __lowercase = 16): UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased') UpperCamelCase_ = load_dataset('glue' , 'mrpc') def tokenize_function(__lowercase): # max_length=None => use the model max length (it's actually the default) UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCamelCase_ = datasets.map( __lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels') def collate_fn(__lowercase): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCamelCase_ = 16 elif accelerator.mixed_precision != "no": UpperCamelCase_ = 8 else: UpperCamelCase_ = None return tokenizer.pad( __lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , ) # Instantiate dataloaders. UpperCamelCase_ = DataLoader( tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) UpperCamelCase_ = DataLoader( tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case__ : List[str] = mocked_dataloaders # noqa: F811 def _snake_case (__lowercase , __lowercase): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1": UpperCamelCase_ = 2 # New Code # UpperCamelCase_ = int(args.gradient_accumulation_steps) # Initialize accelerator UpperCamelCase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`') # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase_ = config['lr'] UpperCamelCase_ = int(config['num_epochs']) UpperCamelCase_ = int(config['seed']) UpperCamelCase_ = int(config['batch_size']) UpperCamelCase_ = evaluate.load('glue' , 'mrpc') set_seed(__lowercase) UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCamelCase_ = model.to(accelerator.device) # Instantiate optimizer UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase) # Instantiate scheduler UpperCamelCase_ = get_linear_schedule_with_warmup( optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # Now we train the model for epoch in range(__lowercase): model.train() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowercase): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = output.loss accelerator.backward(__lowercase) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowercase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): UpperCamelCase_ = model(**__lowercase) UpperCamelCase_ = outputs.logits.argmax(dim=-1) UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels'])) metric.add_batch( predictions=__lowercase , references=__lowercase , ) UpperCamelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __lowercase) def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.') parser.add_argument( '--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(__lowercase , __lowercase) if __name__ == "__main__": main()
23
1
from __future__ import annotations import math def _snake_case (__lowercase): if num <= 0: UpperCamelCase_ = f"""{num}: Invalid input, please enter a positive integer.""" raise ValueError(__lowercase) UpperCamelCase_ = [True] * (num + 1) UpperCamelCase_ = [] UpperCamelCase_ = 2 UpperCamelCase_ = int(math.sqrt(__lowercase)) while start <= end: # If start is a prime if sieve[start] is True: prime.append(__lowercase) # Set multiples of start be False for i in range(start * start , num + 1 , __lowercase): if sieve[i] is True: UpperCamelCase_ = False start += 1 for j in range(end + 1 , num + 1): if sieve[j] is True: prime.append(__lowercase) return prime if __name__ == "__main__": print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
23
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = is_training UpperCamelCase_ = use_auxiliary_loss UpperCamelCase_ = num_queries UpperCamelCase_ = num_channels UpperCamelCase_ = min_size UpperCamelCase_ = max_size UpperCamelCase_ = num_labels UpperCamelCase_ = hidden_dim UpperCamelCase_ = hidden_dim def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _UpperCAmelCase ) UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase ) UpperCamelCase_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5 ).float() UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long() UpperCamelCase_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = MaskaFormerConfig( hidden_size=self.hidden_dim , ) UpperCamelCase_ = self.num_queries UpperCamelCase_ = self.num_labels UpperCamelCase_ = [1, 1, 1, 1] UpperCamelCase_ = self.num_channels UpperCamelCase_ = 64 UpperCamelCase_ = 128 UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim UpperCamelCase_ = self.hidden_dim return config def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = output.encoder_hidden_states UpperCamelCase_ = output.pixel_decoder_hidden_states UpperCamelCase_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any: with torch.no_grad(): UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() def comm_check_on_output(_UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) UpperCamelCase_ = model( pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) comm_check_on_output(_UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} A_ = False A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _UpperCAmelCase ( self ) -> int: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> str: pass def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ = [*signature.parameters.keys()] UpperCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = (self.model_tester.min_size,) * 2 UpperCamelCase_ = { 'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ), 'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ), 'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(), } UpperCamelCase_ = self.model_tester.get_config() UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _UpperCAmelCase ( self ) -> List[Any]: if not self.model_tester.is_training: return UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss loss.backward() def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = self.all_model_classes[1] UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() UpperCamelCase_ = True UpperCamelCase_ = True UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase ) model.train() UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ) UpperCamelCase_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCamelCase_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) snake_case__ : List[Any] = 1E-4 def _snake_case (): UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_vision @slow class _a ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: return "facebook/mask2former-swin-small-coco-instance" @cached_property def _UpperCAmelCase ( self ) -> List[str]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ) UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) UpperCamelCase_ = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) UpperCamelCase_ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) ) with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) # masks_queries_logits UpperCamelCase_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) UpperCamelCase_ = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) # class_queries_logits UpperCamelCase_ = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) UpperCamelCase_ = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval() UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase ) UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']] UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']] with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
23
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass snake_case__ : List[Any] = (3, 9, -1_1, 0, 7, 5, 1, -1) snake_case__ : Optional[Any] = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class _a : """simple docstring""" A_ = 42 A_ = 42 class _a : """simple docstring""" def __init__( self , _UpperCAmelCase ) -> None: UpperCamelCase_ = None for i in sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ): UpperCamelCase_ = Node(_UpperCAmelCase , self.head ) def __iter__( self ) -> Iterator[int]: UpperCamelCase_ = self.head while node: yield node.data UpperCamelCase_ = node.next_node def __len__( self ) -> int: return sum(1 for _ in self ) def __str__( self ) -> str: return " -> ".join([str(_UpperCAmelCase ) for node in self] ) def _snake_case (__lowercase , __lowercase): return SortedLinkedList(list(__lowercase) + list(__lowercase)) if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : str = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
23
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType snake_case__ : List[str] = logging.get_logger(__name__) class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = """vision-encoder-decoder""" A_ = True def __init__( self , **_UpperCAmelCase ) -> Dict: super().__init__(**_UpperCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) UpperCamelCase_ = kwargs.pop('encoder' ) UpperCamelCase_ = encoder_config.pop('model_type' ) UpperCamelCase_ = kwargs.pop('decoder' ) UpperCamelCase_ = decoder_config.pop('model_type' ) UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) UpperCamelCase_ = True @classmethod def _UpperCAmelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> PretrainedConfig: logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) UpperCamelCase_ = True UpperCamelCase_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = copy.deepcopy(self.__dict__ ) UpperCamelCase_ = self.encoder.to_dict() UpperCamelCase_ = self.decoder.to_dict() UpperCamelCase_ = self.__class__.model_type return output class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = version.parse("""1.11""" ) @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _UpperCAmelCase ( self ) -> float: return 1e-4 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} ) class _a ( UpperCAmelCase__ ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: UpperCamelCase_ = OrderedDict() UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} UpperCamelCase_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} UpperCamelCase_ = {0: 'batch', 1: 'encoder_sequence'} return common_inputs def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> Mapping[str, Any]: import torch UpperCamelCase_ = OrderedDict() UpperCamelCase_ = super().generate_dummy_inputs( _UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = dummy_input['input_ids'].shape UpperCamelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCamelCase_ = dummy_input.pop('input_ids' ) UpperCamelCase_ = dummy_input.pop('attention_mask' ) UpperCamelCase_ = torch.zeros(_UpperCAmelCase ) return common_inputs class _a ( UpperCAmelCase__ ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> None: pass def _UpperCAmelCase ( self , _UpperCAmelCase ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "default" ) -> OnnxConfig: UpperCamelCase_ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
23
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : str = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[int] = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys snake_case__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _snake_case (__lowercase , __lowercase , __lowercase): # Initialise PyTorch model UpperCamelCase_ = MobileBertConfig.from_json_file(__lowercase) print(f"""Building PyTorch model from configuration: {config}""") UpperCamelCase_ = MobileBertForPreTraining(__lowercase) # Load weights from tf checkpoint UpperCamelCase_ = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""") torch.save(model.state_dict() , __lowercase) if __name__ == "__main__": snake_case__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
23
1
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py snake_case__ : int = """src/transformers""" snake_case__ : Dict = """docs/source/en/tasks""" def _snake_case (__lowercase , __lowercase , __lowercase): with open(__lowercase , 'r' , encoding='utf-8' , newline='\n') as f: UpperCamelCase_ = f.readlines() # Find the start prompt. UpperCamelCase_ = 0 while not lines[start_index].startswith(__lowercase): start_index += 1 start_index += 1 UpperCamelCase_ = start_index while not lines[end_index].startswith(__lowercase): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. snake_case__ : int = direct_transformers_import(TRANSFORMERS_PATH) snake_case__ : Any = { """asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, """audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, """language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, """image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, """masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, """multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, """object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, """question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, """semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, """sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, """summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, """translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, """document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, """monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). snake_case__ : Any = { """summarization.md""": ("""nllb""",), """translation.md""": ("""nllb""",), } def _snake_case (__lowercase): UpperCamelCase_ = TASK_GUIDE_TO_MODELS[task_guide] UpperCamelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__lowercase , set()) UpperCamelCase_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()]) + "\n" def _snake_case (__lowercase , __lowercase=False): UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = _find_text_in_file( filename=os.path.join(__lowercase , __lowercase) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , ) UpperCamelCase_ = get_model_list_for_task(__lowercase) if current_list != new_list: if overwrite: with open(os.path.join(__lowercase , __lowercase) , 'w' , encoding='utf-8' , newline='\n') as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:]) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ' to fix this.') if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") snake_case__ : str = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
23
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _a ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_MASKED_LM_MAPPING A_ = TF_MODEL_FOR_MASKED_LM_MAPPING def _UpperCAmelCase ( self ) -> List[str]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' ) UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ { 'sequence': 'The largest city in France is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ {'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, ] , ) UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=6 ) , [ [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] , ) @require_torch_gpu def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' ) # convert model to fp16 pipe.model.half() UpperCamelCase_ = pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' ) self.run_large_test(_UpperCAmelCase ) @slow @require_tf def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' ) self.run_large_test(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'}, ] , ) UpperCamelCase_ = unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ { 'sequence': 'The largest city in France is Paris', 'score': 0.2_5_1, 'token': 2201, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.2_1_4, 'token': 12790, 'token_str': ' Lyon', }, ] , ) UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) @require_tf def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' ) UpperCamelCase_ = None UpperCamelCase_ = None self.run_pipeline_test(_UpperCAmelCase , [] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: UpperCamelCase_ = fill_masker.tokenizer UpperCamelCase_ = fill_masker.model UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , ) with self.assertRaises(_UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_UpperCAmelCase ): fill_masker('This is' ) self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase ) self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase ) self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = sorted(vocab.keys() )[:2] # Pipeline argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Call argument UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = {vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase ) UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) ) # Score equivalence UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs] UpperCamelCase_ = [top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ) == set(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase ) UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] ) with self.assertRaises(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = tokenizer.get_vocab() UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # top_k=2, ntargets=3 UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ): UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = tokenizer.get_vocab() # String duplicates + id duplicates UpperCamelCase_ = sorted(vocab.keys() )[:3] UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_UpperCAmelCase ) , 3 ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) UpperCamelCase_ = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( _UpperCAmelCase , [ [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], [ {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, {'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )}, ], ] , )
23
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. snake_case__ : int = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class _a ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: A_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: A_ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) UpperCamelCase_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) UpperCamelCase_ = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}] ) UpperCamelCase_ = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}], [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}], ] , ) UpperCamelCase_ = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) # Legacy behavior UpperCamelCase_ = text_classifier('This is great !' , return_all_scores=_UpperCAmelCase ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) UpperCamelCase_ = text_classifier('This is great !' , return_all_scores=_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}]] ) UpperCamelCase_ = text_classifier(['This is great !', 'Something else'] , return_all_scores=_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}], [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}], ] , ) UpperCamelCase_ = text_classifier(['This is great !', 'Something else'] , return_all_scores=_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_0', 'score': 0.5_0_4}, ] , ) @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: import torch UpperCamelCase_ = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) UpperCamelCase_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) @require_tf def _UpperCAmelCase ( self ) -> Tuple: UpperCamelCase_ = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) UpperCamelCase_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) @slow @require_torch def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = pipeline('text-classification' ) UpperCamelCase_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] ) UpperCamelCase_ = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) UpperCamelCase_ = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] ) @slow @require_tf def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = pipeline('text-classification' , framework='tf' ) UpperCamelCase_ = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] ) UpperCamelCase_ = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) UpperCamelCase_ = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]: UpperCamelCase_ = TextClassificationPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: UpperCamelCase_ = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCamelCase_ = 'HuggingFace is in' UpperCamelCase_ = text_classifier(_UpperCAmelCase ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) UpperCamelCase_ = ['HuggingFace is in ', 'Paris is in France'] UpperCamelCase_ = text_classifier(_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}, {'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCamelCase_ = text_classifier(_UpperCAmelCase , top_k=_UpperCAmelCase ) UpperCamelCase_ = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [[{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] * N, [{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] * N] , ) UpperCamelCase_ = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} UpperCamelCase_ = text_classifier(_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , {'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCamelCase_ = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(_UpperCAmelCase ): text_classifier(_UpperCAmelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCamelCase_ = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
23
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = StableDiffusionSAGPipeline A_ = TEXT_TO_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_BATCH_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = TEXT_TO_IMAGE_IMAGE_PARAMS A_ = False def _UpperCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) UpperCamelCase_ = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase ) UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]: if str(_UpperCAmelCase ).startswith('mps' ): UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase ) else: UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCamelCase_ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def _UpperCAmelCase ( self ) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) UpperCamelCase_ = output.images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase ) sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = '.' UpperCamelCase_ = torch.manual_seed(0 ) UpperCamelCase_ = sag_pipe( [prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) UpperCamelCase_ = output.images assert image.shape == (1, 512, 768, 3)
23
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 A_ = 42 class _a ( nn.Module ): """simple docstring""" A_ = 42 A_ = (16, 32, 96, 256) A_ = jnp.floataa def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) UpperCamelCase_ = [] for i in range(len(self.block_out_channels ) - 1 ): UpperCamelCase_ = self.block_out_channels[i] UpperCamelCase_ = self.block_out_channels[i + 1] UpperCamelCase_ = nn.Conv( _UpperCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_UpperCAmelCase ) UpperCamelCase_ = nn.Conv( _UpperCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_UpperCAmelCase ) UpperCamelCase_ = blocks UpperCamelCase_ = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , _UpperCAmelCase ) -> Any: UpperCamelCase_ = self.conv_in(_UpperCAmelCase ) UpperCamelCase_ = nn.silu(_UpperCAmelCase ) for block in self.blocks: UpperCamelCase_ = block(_UpperCAmelCase ) UpperCamelCase_ = nn.silu(_UpperCAmelCase ) UpperCamelCase_ = self.conv_out(_UpperCAmelCase ) return embedding @flax_register_to_config class _a ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" A_ = 32 A_ = 4 A_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) A_ = False A_ = (320, 640, 1_280, 1_280) A_ = 2 A_ = 8 A_ = None A_ = 1_280 A_ = 0.0 A_ = False A_ = jnp.floataa A_ = True A_ = 0 A_ = "rgb" A_ = (16, 32, 96, 256) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> FrozenDict: # init input tensors UpperCamelCase_ = (1, self.in_channels, self.sample_size, self.sample_size) UpperCamelCase_ = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa ) UpperCamelCase_ = jnp.ones((1,) , dtype=jnp.intaa ) UpperCamelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) UpperCamelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8) UpperCamelCase_ = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa ) UpperCamelCase_ , UpperCamelCase_ = jax.random.split(_UpperCAmelCase ) UpperCamelCase_ = {'params': params_rng, 'dropout': dropout_rng} return self.init(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )["params"] def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = self.block_out_channels UpperCamelCase_ = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. UpperCamelCase_ = self.num_attention_heads or self.attention_head_dim # input UpperCamelCase_ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time UpperCamelCase_ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) UpperCamelCase_ = FlaxTimestepEmbedding(_UpperCAmelCase , dtype=self.dtype ) UpperCamelCase_ = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) UpperCamelCase_ = self.only_cross_attention if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCamelCase_ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCamelCase_ = (num_attention_heads,) * len(self.down_block_types ) # down UpperCamelCase_ = [] UpperCamelCase_ = [] UpperCamelCase_ = block_out_channels[0] UpperCamelCase_ = nn.Conv( _UpperCAmelCase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_UpperCAmelCase ) for i, down_block_type in enumerate(self.down_block_types ): UpperCamelCase_ = output_channel UpperCamelCase_ = block_out_channels[i] UpperCamelCase_ = i == len(_UpperCAmelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": UpperCamelCase_ = FlaxCrossAttnDownBlockaD( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: UpperCamelCase_ = FlaxDownBlockaD( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_UpperCAmelCase ) for _ in range(self.layers_per_block ): UpperCamelCase_ = nn.Conv( _UpperCAmelCase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_UpperCAmelCase ) if not is_final_block: UpperCamelCase_ = nn.Conv( _UpperCAmelCase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_UpperCAmelCase ) UpperCamelCase_ = down_blocks UpperCamelCase_ = controlnet_down_blocks # mid UpperCamelCase_ = block_out_channels[-1] UpperCamelCase_ = FlaxUNetMidBlockaDCrossAttn( in_channels=_UpperCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) UpperCamelCase_ = nn.Conv( _UpperCAmelCase , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1.0 , _UpperCAmelCase = True , _UpperCAmelCase = False , ) -> Union[FlaxControlNetOutput, Tuple]: UpperCamelCase_ = self.controlnet_conditioning_channel_order if channel_order == "bgr": UpperCamelCase_ = jnp.flip(_UpperCAmelCase , axis=1 ) # 1. time if not isinstance(_UpperCAmelCase , jnp.ndarray ): UpperCamelCase_ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: UpperCamelCase_ = timesteps.astype(dtype=jnp.floataa ) UpperCamelCase_ = jnp.expand_dims(_UpperCAmelCase , 0 ) UpperCamelCase_ = self.time_proj(_UpperCAmelCase ) UpperCamelCase_ = self.time_embedding(_UpperCAmelCase ) # 2. pre-process UpperCamelCase_ = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) ) UpperCamelCase_ = self.conv_in(_UpperCAmelCase ) UpperCamelCase_ = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) ) UpperCamelCase_ = self.controlnet_cond_embedding(_UpperCAmelCase ) sample += controlnet_cond # 3. down UpperCamelCase_ = (sample,) for down_block in self.down_blocks: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCamelCase_ , UpperCamelCase_ = down_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) else: UpperCamelCase_ , UpperCamelCase_ = down_block(_UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) down_block_res_samples += res_samples # 4. mid UpperCamelCase_ = self.mid_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) # 5. contronet blocks UpperCamelCase_ = () for down_block_res_sample, controlnet_block in zip(_UpperCAmelCase , self.controlnet_down_blocks ): UpperCamelCase_ = controlnet_block(_UpperCAmelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) UpperCamelCase_ = controlnet_down_block_res_samples UpperCamelCase_ = self.controlnet_mid_block(_UpperCAmelCase ) # 6. scaling UpperCamelCase_ = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_UpperCAmelCase , mid_block_res_sample=_UpperCAmelCase )
23
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar snake_case__ : List[str] = TypeVar("""T""") def _snake_case (__lowercase): return (position - 1) // 2 def _snake_case (__lowercase): return (2 * position) + 1 def _snake_case (__lowercase): return (2 * position) + 2 class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = [] UpperCamelCase_ = {} UpperCamelCase_ = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def _UpperCAmelCase ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) UpperCamelCase_ = self.elements self.elements += 1 self._bubble_up(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) UpperCamelCase_ , UpperCamelCase_ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: UpperCamelCase_ , UpperCamelCase_ = self.heap[0] self._bubble_down(_UpperCAmelCase ) return elem def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Update the weight of the given key UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ = (elem, weight) if position > 0: UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] UpperCamelCase_ = self.position_map[elem] if curr_pos == 0: return None UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_up(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ = get_child_left_position(_UpperCAmelCase ) UpperCamelCase_ = get_child_right_position(_UpperCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) if child_left_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) else: return None if child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Swap the nodes at the given positions UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ , UpperCamelCase_ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) UpperCamelCase_ = nodea_pos UpperCamelCase_ = nodea_pos class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = {} UpperCamelCase_ = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: UpperCamelCase_ = {} self.nodes += 1 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(_UpperCAmelCase ) self.add_node(_UpperCAmelCase ) UpperCamelCase_ = weight UpperCamelCase_ = weight def _snake_case (__lowercase , ): UpperCamelCase_ = {node: maxsize for node in graph.connections} UpperCamelCase_ = {node: None for node in graph.connections} UpperCamelCase_ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__lowercase , __lowercase) if priority_queue.is_empty(): return dist, parent # initialization UpperCamelCase_ = priority_queue.extract_min() UpperCamelCase_ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node # running prim's algorithm while not priority_queue.is_empty(): UpperCamelCase_ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node return dist, parent
23
1
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=224 , _UpperCAmelCase=1000 , _UpperCAmelCase=[3, 3, 6, 4] , _UpperCAmelCase=[48, 56, 112, 220] , ) -> Optional[int]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = num_channels UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = num_labels UpperCamelCase_ = image_size UpperCamelCase_ = layer_depths UpperCamelCase_ = embed_dims def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase_ = None if self.use_labels: UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase_ = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> str: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1e-5 , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: UpperCamelCase_ = SwiftFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCamelCase_ = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: UpperCamelCase_ = self.num_labels UpperCamelCase_ = SwiftFormerForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCamelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) UpperCamelCase_ = SwiftFormerForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase_ = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: ((UpperCamelCase_) , (UpperCamelCase_) , (UpperCamelCase_)) = self.prepare_config_and_inputs() UpperCamelCase_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () A_ = ( {"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification} if is_torch_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = SwiftFormerModelTester(self ) UpperCamelCase_ = ConfigTester( self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='SwiftFormer does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> List[str]: pass def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(_UpperCAmelCase ) UpperCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ = [*signature.parameters.keys()] UpperCamelCase_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Optional[Any]: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_ = SwiftFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip(reason='SwiftFormer does not output attentions' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: pass def _UpperCAmelCase ( self ) -> int: def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): UpperCamelCase_ = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): UpperCamelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) UpperCamelCase_ = outputs.hidden_states UpperCamelCase_ = 8 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase_ = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Tuple: def _config_zero_init(_UpperCAmelCase ): UpperCamelCase_ = copy.deepcopy(_UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_UpperCAmelCase , _UpperCAmelCase , 1e-10 ) if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ): UpperCamelCase_ = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) ) setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return configs_no_init UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase_ = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: UpperCamelCase_ = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: pass def _snake_case (): UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_torch @require_vision class _a ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCAmelCase ( self ) -> List[str]: return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(_UpperCAmelCase ) UpperCamelCase_ = self.default_image_processor UpperCamelCase_ = prepare_img() UpperCamelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): UpperCamelCase_ = model(**_UpperCAmelCase ) # verify the logits UpperCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) UpperCamelCase_ = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
23
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar snake_case__ : Dict = TypeVar("""T""") class _a ( Generic[T] ): """simple docstring""" A_ = 42 # Cache store of keys A_ = 42 # References of the keys in cache A_ = 10 # Maximum capacity of cache def __init__( self , _UpperCAmelCase ) -> None: UpperCamelCase_ = deque() UpperCamelCase_ = set() if not n: UpperCamelCase_ = sys.maxsize elif n < 0: raise ValueError('n should be an integer greater than 0.' ) else: UpperCamelCase_ = n def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: UpperCamelCase_ = self.dq_store.pop() self.key_reference.remove(_UpperCAmelCase ) else: self.dq_store.remove(_UpperCAmelCase ) self.dq_store.appendleft(_UpperCAmelCase ) self.key_reference.add(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> None: for k in self.dq_store: print(_UpperCAmelCase ) def __repr__( self ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : LRUCache[str | int] = LRUCache(4) lru_cache.refer("""A""") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("""A""") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
23
1
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ): set_seed(3) # generate train_data and objective_set UpperCamelCase_ , UpperCamelCase_ = generate_datasets( __lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase) # keeps model same across runs set_seed(4) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # load pretrained model UpperCamelCase_ = load_gpta('gpt2').to(__lowercase) print('computing perplexity on objective set') UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item() print('perplexity on objective set:' , __lowercase) # collect igf pairs and save to file demo.jbl collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ): set_seed(42) # Load pre-trained model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') # Initialize secondary learner to use embedding weights of model UpperCamelCase_ = SecondaryLearner(__lowercase) # Train secondary learner UpperCamelCase_ = train_secondary_learner( __lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ): UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') UpperCamelCase_ = RandomSampler(__lowercase) UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase) UpperCamelCase_ = max_steps // (len(__lowercase)) + 1 UpperCamelCase_ = 0 UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase) model.train() if secondary_learner is not None: secondary_learner.to(__lowercase) secondary_learner.eval() UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [] UpperCamelCase_ = [] # Compute the performance of the transformer model at the beginning UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) for epoch in range(int(__lowercase)): for step, example in enumerate(__lowercase): torch.cuda.empty_cache() UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1) UpperCamelCase_ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase_ = model(__lowercase , labels=__lowercase) UpperCamelCase_ = True if secondary_learner is not None: UpperCamelCase_ = secondary_learner.forward( torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item() observed_qs.append(float(__lowercase)) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase_ = -1 if predicted_q < threshold: UpperCamelCase_ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu())) UpperCamelCase_ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase_ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __lowercase) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task') # Required parameters parser.add_argument( '--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=__lowercase , default=__lowercase , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.') parser.add_argument( '--context_len' , default=32 , type=__lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq') parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs') parser.add_argument( '--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ') parser.add_argument( '--eval_interval' , default=10 , type=__lowercase , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data') parser.add_argument( '--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set') parser.add_argument( '--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner') parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length') parser.add_argument( '--threshold' , default=1.0 , type=__lowercase , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name') parser.add_argument( '--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner UpperCamelCase_ = joblib.load('data/IGF_values.jbl') # Train secondary learner UpperCamelCase_ = training_secondary_learner( __lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') set_seed(42) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase_ , UpperCamelCase_ = generate_datasets( context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
23
import numpy as np def _snake_case (__lowercase): return 1 / (1 + np.exp(-vector)) def _snake_case (__lowercase): return vector * sigmoid(__lowercase) if __name__ == "__main__": import doctest doctest.testmod()
23
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = StableUnCLIPImgaImgPipeline A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A_ = frozenset([] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = 32 UpperCamelCase_ = embedder_hidden_size # image encoding components UpperCamelCase_ = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) UpperCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase ) UpperCamelCase_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , ) torch.manual_seed(0 ) UpperCamelCase_ = DDIMScheduler( beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL() UpperCamelCase_ = { # image encoding components 'feature_extractor': feature_extractor, 'image_encoder': image_encoder.eval(), # image noising components 'image_normalizer': image_normalizer.eval(), 'image_noising_scheduler': image_noising_scheduler, # regular denoising components 'tokenizer': tokenizer, 'text_encoder': text_encoder.eval(), 'unet': unet.eval(), 'scheduler': scheduler, 'vae': vae.eval(), } return components def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 , _UpperCAmelCase=True ) -> Optional[int]: if str(_UpperCAmelCase ).startswith('mps' ): UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase ) else: UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) if pil_image: UpperCamelCase_ = input_image * 0.5 + 0.5 UpperCamelCase_ = input_image.clamp(0 , 1 ) UpperCamelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() UpperCamelCase_ = DiffusionPipeline.numpy_to_pil(_UpperCAmelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase_ = self.get_dummy_components() UpperCamelCase_ = StableUnCLIPImgaImgPipeline(**_UpperCAmelCase ) UpperCamelCase_ = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) UpperCamelCase_ = self.get_dummy_inputs(_UpperCAmelCase ) inputs.update({'image_embeds': None} ) UpperCamelCase_ = sd_pipe(**_UpperCAmelCase ).images UpperCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase_ = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = torch_device in ['cpu', 'mps'] self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = torch_device in ['cpu', 'mps'] self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _UpperCAmelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_UpperCAmelCase ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) UpperCamelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' ) UpperCamelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase_ = pipe(_UpperCAmelCase , 'anime turle' , generator=_UpperCAmelCase , output_type='np' ) UpperCamelCase_ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: UpperCamelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) UpperCamelCase_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' ) UpperCamelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase_ = pipe(_UpperCAmelCase , 'anime turle' , generator=_UpperCAmelCase , output_type='np' ) UpperCamelCase_ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCamelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained( 'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa ) UpperCamelCase_ = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCamelCase_ = pipe( _UpperCAmelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , ) UpperCamelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
23
import math from datetime import datetime, timedelta def _snake_case (__lowercase): UpperCamelCase_ = year % 19 UpperCamelCase_ = year % 4 UpperCamelCase_ = year % 7 UpperCamelCase_ = math.floor(year / 100) UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25) UpperCamelCase_ = leap_day_inhibits / 4 UpperCamelCase_ = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon UpperCamelCase_ = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 19) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 18) else: return datetime(__lowercase , 3 , 22) + timedelta( days=int(days_to_add + days_from_phm_to_sunday)) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): snake_case__ : Dict = """will be""" if year > datetime.now().year else """was""" print(f'Easter in {year} {tense} {gauss_easter(year)}')
23
1
def _snake_case (__lowercase , __lowercase): if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive') UpperCamelCase_ = str(bin(__lowercase))[2:] # remove the leading "0b" UpperCamelCase_ = str(bin(__lowercase))[2:] UpperCamelCase_ = max(len(__lowercase) , len(__lowercase)) return "0b" + "".join( str(int('1' in (char_a, char_b))) for char_a, char_b in zip(a_binary.zfill(__lowercase) , b_binary.zfill(__lowercase))) if __name__ == "__main__": import doctest doctest.testmod()
23
import requests def _snake_case (__lowercase , __lowercase): UpperCamelCase_ = {'Content-Type': 'application/json'} UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase) if response.status_code != 200: UpperCamelCase_ = ( 'Request to slack returned an error ' f"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(__lowercase) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
23
1
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar snake_case__ : List[str] = TypeVar("""T""") def _snake_case (__lowercase): return (position - 1) // 2 def _snake_case (__lowercase): return (2 * position) + 1 def _snake_case (__lowercase): return (2 * position) + 2 class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = [] UpperCamelCase_ = {} UpperCamelCase_ = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def _UpperCAmelCase ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) UpperCamelCase_ = self.elements self.elements += 1 self._bubble_up(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) UpperCamelCase_ , UpperCamelCase_ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: UpperCamelCase_ , UpperCamelCase_ = self.heap[0] self._bubble_down(_UpperCAmelCase ) return elem def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Update the weight of the given key UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ = (elem, weight) if position > 0: UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) else: self._bubble_down(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] UpperCamelCase_ = self.position_map[elem] if curr_pos == 0: return None UpperCamelCase_ = get_parent_position(_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ , UpperCamelCase_ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_up(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] UpperCamelCase_ = self.position_map[elem] UpperCamelCase_ , UpperCamelCase_ = self.heap[curr_pos] UpperCamelCase_ = get_child_left_position(_UpperCAmelCase ) UpperCamelCase_ = get_child_right_position(_UpperCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) if child_left_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) else: return None if child_right_position < self.elements: UpperCamelCase_ , UpperCamelCase_ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase ) return self._bubble_down(_UpperCAmelCase ) return None def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Swap the nodes at the given positions UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ = self.heap[nodea_pos][0] UpperCamelCase_ , UpperCamelCase_ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) UpperCamelCase_ = nodea_pos UpperCamelCase_ = nodea_pos class _a ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: UpperCamelCase_ = {} UpperCamelCase_ = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: UpperCamelCase_ = {} self.nodes += 1 def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(_UpperCAmelCase ) self.add_node(_UpperCAmelCase ) UpperCamelCase_ = weight UpperCamelCase_ = weight def _snake_case (__lowercase , ): UpperCamelCase_ = {node: maxsize for node in graph.connections} UpperCamelCase_ = {node: None for node in graph.connections} UpperCamelCase_ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__lowercase , __lowercase) if priority_queue.is_empty(): return dist, parent # initialization UpperCamelCase_ = priority_queue.extract_min() UpperCamelCase_ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node # running prim's algorithm while not priority_queue.is_empty(): UpperCamelCase_ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCamelCase_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowercase , dist[neighbour]) UpperCamelCase_ = node return dist, parent
23
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _a ( UpperCAmelCase__ ): """simple docstring""" def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) UpperCamelCase_ = input_file.read() UpperCamelCase_ = regexp.search(_UpperCAmelCase ) return match def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict: with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) UpperCamelCase_ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCamelCase_ = regexp.finditer(_UpperCAmelCase ) UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = Path('./datasets' ) UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(_UpperCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
23
1
import unittest from transformers import DonutProcessor snake_case__ : Union[str, Any] = """naver-clova-ix/donut-base""" class _a ( unittest.TestCase ): """simple docstring""" def _UpperCAmelCase ( self ) -> Any: UpperCamelCase_ = DonutProcessor.from_pretrained(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } UpperCamelCase_ = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) UpperCamelCase_ = self.processor.tokenajson(_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
23
import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ): set_seed(3) # generate train_data and objective_set UpperCamelCase_ , UpperCamelCase_ = generate_datasets( __lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase) # keeps model same across runs set_seed(4) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # load pretrained model UpperCamelCase_ = load_gpta('gpt2').to(__lowercase) print('computing perplexity on objective set') UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item() print('perplexity on objective set:' , __lowercase) # collect igf pairs and save to file demo.jbl collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ): set_seed(42) # Load pre-trained model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') # Initialize secondary learner to use embedding weights of model UpperCamelCase_ = SecondaryLearner(__lowercase) # Train secondary learner UpperCamelCase_ = train_secondary_learner( __lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ): UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') UpperCamelCase_ = RandomSampler(__lowercase) UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase) UpperCamelCase_ = max_steps // (len(__lowercase)) + 1 UpperCamelCase_ = 0 UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase) model.train() if secondary_learner is not None: secondary_learner.to(__lowercase) secondary_learner.eval() UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [] UpperCamelCase_ = [] # Compute the performance of the transformer model at the beginning UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) for epoch in range(int(__lowercase)): for step, example in enumerate(__lowercase): torch.cuda.empty_cache() UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1) UpperCamelCase_ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase_ = model(__lowercase , labels=__lowercase) UpperCamelCase_ = True if secondary_learner is not None: UpperCamelCase_ = secondary_learner.forward( torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item() observed_qs.append(float(__lowercase)) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase_ = -1 if predicted_q < threshold: UpperCamelCase_ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu())) UpperCamelCase_ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase_ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase) test_perps.append(__lowercase) print('Test perplexity, step' , __lowercase , ':' , __lowercase) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __lowercase) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _snake_case (): UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task') # Required parameters parser.add_argument( '--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , ) parser.add_argument( '--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--data_file' , type=__lowercase , default=__lowercase , help=( 'A jbl file containing tokenized data which can be split as objective dataset, ' 'train_dataset and test_dataset.' ) , ) parser.add_argument( '--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , ) parser.add_argument( '--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , ) parser.add_argument( '--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.') parser.add_argument( '--context_len' , default=32 , type=__lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , ) parser.add_argument( '--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq') parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs') parser.add_argument( '--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , ) parser.add_argument( '--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ') parser.add_argument( '--eval_interval' , default=10 , type=__lowercase , help=( 'decay the selectivity of our secondary learner filter from' '1 standard deviation above average to 1 below average after 10 batches' ) , ) parser.add_argument( '--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data') parser.add_argument( '--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set') parser.add_argument( '--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner') parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length') parser.add_argument( '--threshold' , default=1.0 , type=__lowercase , help=( 'The threshold value used by secondary learner to filter the train_data and allow only' ' informative data as input to the model' ) , ) parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name') parser.add_argument( '--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , ) # Load train data for secondary learner UpperCamelCase_ = joblib.load('data/IGF_values.jbl') # Train secondary learner UpperCamelCase_ = training_secondary_learner( __lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , ) # load pretrained gpt2 model UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2') set_seed(42) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase_ , UpperCamelCase_ = generate_datasets( context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , ) if __name__ == "__main__": main()
23
1
def _snake_case (__lowercase): if not isinstance(__lowercase , __lowercase): raise TypeError('only integers accepted as input') else: UpperCamelCase_ = str(abs(__lowercase)) UpperCamelCase_ = [list(__lowercase) for char in range(len(__lowercase))] for index in range(len(__lowercase)): num_transpositions[index].pop(__lowercase) return max( int(''.join(list(__lowercase))) for transposition in num_transpositions) if __name__ == "__main__": __import__("""doctest""").testmod()
23
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _a : """simple docstring""" A_ = MBartConfig A_ = {} A_ = """gelu""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Union[str, Any]: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = eos_token_id UpperCamelCase_ = pad_token_id UpperCamelCase_ = bos_token_id def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_ = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = TFMBartModel(config=_UpperCAmelCase ).get_decoder() UpperCamelCase_ = inputs_dict['input_ids'] UpperCamelCase_ = input_ids[:1, :] UpperCamelCase_ = inputs_dict['attention_mask'][:1, :] UpperCamelCase_ = inputs_dict['head_mask'] UpperCamelCase_ = 1 # first forward pass UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple() UpperCamelCase_ = past_key_values[1] def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ): if attention_mask is None: UpperCamelCase_ = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id) , tf.inta) if decoder_attention_mask is None: UpperCamelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta), ] , axis=-1 , ) if head_mask is None: UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () A_ = (TFMBartForConditionalGeneration,) if is_tf_available() else () A_ = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) A_ = True A_ = False A_ = False def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = TFMBartModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _a ( unittest.TestCase ): """simple docstring""" A_ = [ """ UN Chief Says There Is No Military Solution in Syria""", ] A_ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] A_ = """facebook/mbart-large-en-ro""" @cached_property def _UpperCAmelCase ( self ) -> Any: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> int: UpperCamelCase_ = self.translate_src_text(**_UpperCAmelCase ) self.assertListEqual(self.expected_text , _UpperCAmelCase ) def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> List[str]: UpperCamelCase_ = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' ) UpperCamelCase_ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) UpperCamelCase_ = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def _UpperCAmelCase ( self ) -> List[Any]: self._assert_generated_batch_equal_expected()
23
1
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class _a : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=12 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0 , _UpperCAmelCase=None , ) -> Dict: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_input_mask UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = projection_dim UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = dropout UpperCamelCase_ = attention_dropout UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = initializer_range UpperCamelCase_ = scope UpperCamelCase_ = bos_token_id def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = None if self.use_input_mask: UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCamelCase_ = input_mask.numpy() UpperCamelCase_ , UpperCamelCase_ = input_mask.shape UpperCamelCase_ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCAmelCase ): UpperCamelCase_ = 1 UpperCamelCase_ = 0 UpperCamelCase_ = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = TFBlipTextModel(config=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , training=_UpperCAmelCase ) UpperCamelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs UpperCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class _a ( UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = (TFBlipTextModel,) if is_tf_available() else () A_ = False A_ = False A_ = False def _UpperCAmelCase ( self ) -> List[str]: UpperCamelCase_ = BlipTextModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def _UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass def _UpperCAmelCase ( self ) -> List[Any]: pass @unittest.skip(reason='Blip does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> List[str]: pass @unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass @unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' ) def _UpperCAmelCase ( self ) -> Dict: pass @slow def _UpperCAmelCase ( self ) -> int: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_ = TFBlipTextModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase=True ) -> List[Any]: super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
23
def _snake_case (__lowercase): UpperCamelCase_ = 1 for i in range(1 , num + 1): fact *= i return fact def _snake_case (__lowercase): UpperCamelCase_ = 0 while number > 0: UpperCamelCase_ = number % 10 sum_of_digits += last_digit UpperCamelCase_ = number // 10 # Removing the last_digit from the given number return sum_of_digits def _snake_case (__lowercase = 100): UpperCamelCase_ = factorial(__lowercase) UpperCamelCase_ = split_and_add(__lowercase) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
23
1
import re def _snake_case (__lowercase): return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_)] def _snake_case (__lowercase): UpperCamelCase_ = split_input(str_) return "".join( [''.join([char.capitalize() for char in sub_str]) for sub_str in string_split]) def _snake_case (__lowercase , __lowercase , __lowercase): try: UpperCamelCase_ = split_input(__lowercase) if upper: UpperCamelCase_ = ''.join( [ separator.join([char.upper() for char in sub_str]) for sub_str in string_split ]) else: UpperCamelCase_ = ''.join( [ separator.join([char.lower() for char in sub_str]) for sub_str in string_split ]) return res_str except IndexError: return "not valid string" def _snake_case (__lowercase): return to_simple_case(__lowercase) def _snake_case (__lowercase): try: UpperCamelCase_ = to_simple_case(__lowercase) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _snake_case (__lowercase , __lowercase): return to_complex_case(__lowercase , __lowercase , '_') def _snake_case (__lowercase , __lowercase): return to_complex_case(__lowercase , __lowercase , '-') if __name__ == "__main__": __import__("""doctest""").testmod()
23
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case__ : str = logging.get_logger(__name__) def _snake_case (__lowercase): if isinstance(__lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(__lowercase , (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(__lowercase): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""") class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = ["""pixel_values"""] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None: super().__init__(**_UpperCAmelCase ) UpperCamelCase_ = size if size is not None else {'shortest_edge': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' ) UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = do_center_crop UpperCamelCase_ = crop_size UpperCamelCase_ = resample UpperCamelCase_ = do_rescale UpperCamelCase_ = rescale_factor UpperCamelCase_ = do_normalize UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" in size: UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size['shortest_edge'] , default_to_square=_UpperCAmelCase ) elif "height" in size and "width" in size: UpperCamelCase_ = (size['height'], size['width']) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: UpperCamelCase_ = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> int: return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray: return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. UpperCamelCase_ = to_numpy_array(_UpperCAmelCase ) if do_resize: UpperCamelCase_ = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) if do_center_crop: UpperCamelCase_ = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase ) if do_rescale: UpperCamelCase_ = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) if do_normalize: UpperCamelCase_ = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) UpperCamelCase_ = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) return image def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image: UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' ) if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) UpperCamelCase_ = make_batched(_UpperCAmelCase ) UpperCamelCase_ = [ [ self._preprocess_image( image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , ) for img in video ] for video in videos ] UpperCamelCase_ = {'pixel_values': videos} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
23
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def _snake_case (__lowercase = 8): UpperCamelCase_ = ascii_letters + digits + punctuation return "".join(secrets.choice(__lowercase) for _ in range(__lowercase)) def _snake_case (__lowercase , __lowercase): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(__lowercase) UpperCamelCase_ = i // 3 UpperCamelCase_ = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) UpperCamelCase_ = ( chars_incl + random(__lowercase , quotient + remainder) + random(__lowercase , __lowercase) + random(__lowercase , __lowercase) ) UpperCamelCase_ = list(__lowercase) shuffle(__lowercase) return "".join(__lowercase) # random is a generalised function for letters, characters and numbers def _snake_case (__lowercase , __lowercase): return "".join(secrets.choice(__lowercase) for _ in range(__lowercase)) def _snake_case (__lowercase , __lowercase): pass # Put your code here... def _snake_case (__lowercase , __lowercase): pass # Put your code here... def _snake_case (__lowercase , __lowercase): pass # Put your code here... def _snake_case (__lowercase , __lowercase = 8): if len(__lowercase) < min_length: # Your Password must be at least 8 characters long return False UpperCamelCase_ = any(char in ascii_uppercase for char in password) UpperCamelCase_ = any(char in ascii_lowercase for char in password) UpperCamelCase_ = any(char in digits for char in password) UpperCamelCase_ = any(char in punctuation for char in password) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def _snake_case (): UpperCamelCase_ = int(input('Please indicate the max length of your password: ').strip()) UpperCamelCase_ = input( 'Please indicate the characters that must be in your password: ').strip() print('Password generated:' , password_generator(__lowercase)) print( 'Alternative Password generated:' , alternative_password_generator(__lowercase , __lowercase) , ) print('[If you are thinking of using this passsword, You better save it.]') if __name__ == "__main__": main()
23
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = 42 A_ = 42 class _a ( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" A_ = 1 @register_to_config def __init__( self , _UpperCAmelCase = 2000 , _UpperCAmelCase = 0.1_5 , _UpperCAmelCase = 0.0_1 , _UpperCAmelCase = 1_3_4_8.0 , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = 1 , ) -> Tuple: # standard deviation of the initial noise distribution UpperCamelCase_ = sigma_max # setable values UpperCamelCase_ = None self.set_sigmas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> torch.FloatTensor: return sample def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> str: UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps UpperCamelCase_ = torch.linspace(1 , _UpperCAmelCase , _UpperCAmelCase , device=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Any: UpperCamelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min UpperCamelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max UpperCamelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) UpperCamelCase_ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ) , math.log(_UpperCAmelCase ) , _UpperCAmelCase ) ) UpperCamelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SdeVeOutput, Tuple]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) UpperCamelCase_ = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) UpperCamelCase_ = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda UpperCamelCase_ = timesteps.to(self.discrete_sigmas.device ) UpperCamelCase_ = self.discrete_sigmas[timesteps].to(sample.device ) UpperCamelCase_ = self.get_adjacent_sigma(_UpperCAmelCase , _UpperCAmelCase ).to(sample.device ) UpperCamelCase_ = torch.zeros_like(_UpperCAmelCase ) UpperCamelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods UpperCamelCase_ = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): UpperCamelCase_ = diffusion.unsqueeze(-1 ) UpperCamelCase_ = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of UpperCamelCase_ = randn_tensor( sample.shape , layout=sample.layout , generator=_UpperCAmelCase , device=sample.device , dtype=sample.dtype ) UpperCamelCase_ = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? UpperCamelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=_UpperCAmelCase , prev_sample_mean=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction UpperCamelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_UpperCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr UpperCamelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() UpperCamelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() UpperCamelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 UpperCamelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term UpperCamelCase_ = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): UpperCamelCase_ = step_size.unsqueeze(-1 ) UpperCamelCase_ = sample + step_size * model_output UpperCamelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCamelCase_ = timesteps.to(original_samples.device ) UpperCamelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps] UpperCamelCase_ = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None] ) UpperCamelCase_ = noise + original_samples return noisy_samples def __len__( self ) -> Optional[int]: return self.config.num_train_timesteps
23
1