code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def lowerCAmelCase__( lowercase : Dict , lowercase : Optional[Any]=False ) -> Tuple: __snake_case : Any = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __snake_case : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def lowerCAmelCase__( lowercase : Any , lowercase : Optional[int] , lowercase : Optional[int]=False ) -> Optional[Any]: for i in range(config.num_hidden_layers ): if base_model: __snake_case : Dict = "" else: __snake_case : Any = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __snake_case : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) __snake_case : List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __snake_case : Any = in_proj_weight[ : config.hidden_size, : ] __snake_case : Optional[Any] = in_proj_bias[: config.hidden_size] __snake_case : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __snake_case : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __snake_case : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] __snake_case : Union[str, Any] = in_proj_bias[-config.hidden_size :] def lowerCAmelCase__( lowercase : Any ) -> Tuple: __snake_case : Any = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(lowercase , lowercase ) def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] , lowercase : List[Any] ) -> Union[str, Any]: __snake_case : Optional[int] = dct.pop(lowercase ) __snake_case : List[Any] = val def lowerCAmelCase__( ) -> Tuple: __snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : List[str] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im @torch.no_grad() def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Optional[Any]=False ) -> Union[str, Any]: __snake_case : List[Any] = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowercase , ) __snake_case : Dict = ViTHybridConfig(backbone_config=lowercase , image_size=384 , num_labels=1000 ) __snake_case : List[Any] = False # load original model from timm __snake_case : List[Any] = timm.create_model(lowercase , pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys __snake_case : List[Any] = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) __snake_case : Tuple = create_rename_keys(lowercase , lowercase ) for src, dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) read_in_q_k_v(lowercase , lowercase , lowercase ) __snake_case : Optional[int] = "huggingface/label-files" __snake_case : int = "imagenet-1k-id2label.json" __snake_case : Union[str, Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) __snake_case : int = {int(lowercase ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Dict = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": __snake_case : str = ViTHybridModel(lowercase ).eval() else: __snake_case : Optional[Any] = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor __snake_case : Dict = create_transform(**resolve_data_config({} , model=lowercase ) ) __snake_case : List[Any] = transform.transforms __snake_case : Optional[int] = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } __snake_case : int = ViTHybridImageProcessor( do_resize=lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) __snake_case : int = prepare_img() __snake_case : Tuple = transform(lowercase ).unsqueeze(0 ) __snake_case : Optional[int] = processor(lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(lowercase , lowercase ) # verify logits with torch.no_grad(): __snake_case : Optional[int] = model(lowercase ) __snake_case : Optional[int] = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: __snake_case : Tuple = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase , outputs.pooler_output , atol=1E-3 ) else: __snake_case : int = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowercase ) if push_to_hub: print(f"""Pushing model and processor to the hub {vit_name}""" ) model.push_to_hub(f"""ybelkada/{vit_name}""" ) processor.push_to_hub(f"""ybelkada/{vit_name}""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) _UpperCamelCase = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
326
import argparse import datetime def lowerCAmelCase__( lowercase : str ) -> str: __snake_case : int = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } __snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase ) < 11: raise ValueError("Must be 10 characters long" ) # Get month __snake_case : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12" ) __snake_case : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get day __snake_case : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31" ) # Get second separator __snake_case : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get year __snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation __snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) ) # Start math if m <= 2: __snake_case : Optional[Any] = y - 1 __snake_case : Tuple = m + 12 # maths var __snake_case : int = int(str(lowercase )[:2] ) __snake_case : int = int(str(lowercase )[2:] ) __snake_case : int = int(2.6 * m - 5.3_9 ) __snake_case : int = int(c / 4 ) __snake_case : int = int(k / 4 ) __snake_case : int = int(d + k ) __snake_case : int = int(t + u + v + x ) __snake_case : int = int(z - (2 * c) ) __snake_case : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response __snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) _UpperCamelCase = parser.parse_args() zeller(args.date_input)
326
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : str ="data2vec-audio" def __init__( self , UpperCAmelCase=32 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-5 , UpperCAmelCase="gelu" , UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=16 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=0.05 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=0 , UpperCAmelCase="sum" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=(512, 512, 512, 512, 1500) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> str: '''simple docstring''' super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase ) __snake_case : Optional[Any] = hidden_size __snake_case : Union[str, Any] = feat_extract_activation __snake_case : Dict = list(UpperCAmelCase ) __snake_case : List[Any] = list(UpperCAmelCase ) __snake_case : Union[str, Any] = list(UpperCAmelCase ) __snake_case : List[Any] = conv_bias __snake_case : Optional[Any] = num_conv_pos_embeddings __snake_case : Tuple = num_conv_pos_embedding_groups __snake_case : Optional[Any] = conv_pos_kernel_size __snake_case : Tuple = len(self.conv_dim ) __snake_case : str = num_hidden_layers __snake_case : str = intermediate_size __snake_case : Optional[Any] = hidden_act __snake_case : Union[str, Any] = num_attention_heads __snake_case : Any = hidden_dropout __snake_case : int = attention_dropout __snake_case : List[str] = activation_dropout __snake_case : Optional[Any] = feat_proj_dropout __snake_case : int = final_dropout __snake_case : Any = layerdrop __snake_case : int = layer_norm_eps __snake_case : List[str] = initializer_range __snake_case : Any = vocab_size __snake_case : List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case : Dict = mask_time_prob __snake_case : Tuple = mask_time_length __snake_case : Optional[Any] = mask_time_min_masks __snake_case : Optional[int] = mask_feature_prob __snake_case : List[Any] = mask_feature_length __snake_case : List[Any] = mask_feature_min_masks # ctc loss __snake_case : int = ctc_loss_reduction __snake_case : Optional[Any] = ctc_zero_infinity # adapter __snake_case : str = add_adapter __snake_case : List[str] = adapter_kernel_size __snake_case : Optional[Any] = adapter_stride __snake_case : int = num_adapter_layers __snake_case : int = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __snake_case : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __snake_case : Tuple = list(UpperCAmelCase ) __snake_case : Dict = list(UpperCAmelCase ) __snake_case : Dict = list(UpperCAmelCase ) __snake_case : Union[str, Any] = xvector_output_dim @property def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return math.prod(self.conv_stride )
326
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int: if index == r: for j in range(lowercase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __snake_case : Union[str, Any] = arr[i] combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]: # A temporary array to store all combination one by one __snake_case : Tuple = [0] * r # Print all combination using temporary array 'data[]' combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 ) if __name__ == "__main__": # Driver code to check the function above _UpperCamelCase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
326
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCamelCase = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''YolosFeatureExtractor'''] _UpperCamelCase = ['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
326
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] _UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def lowerCAmelCase__( lowercase : str ) -> Optional[Any]: __snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" ) return sd def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict: __snake_case : Tuple = OrderedDict() __snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __snake_case : Optional[Any] = key for name_pair in rename_keys_prefix: __snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __snake_case : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __snake_case : List[Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]: assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: __snake_case : Any = "pretraining" if "vcr" in checkpoint_path: __snake_case : Optional[Any] = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: __snake_case : Tuple = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: __snake_case : Any = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 512} __snake_case : Any = "multichoice" elif "vqa_advanced" in checkpoint_path: __snake_case : List[Any] = {"visual_embedding_dim": 2048} __snake_case : Optional[Any] = "vqa_advanced" elif "vqa" in checkpoint_path: __snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129} __snake_case : Union[str, Any] = "vqa" elif "nlvr" in checkpoint_path: __snake_case : Tuple = { "visual_embedding_dim": 1024, "num_labels": 2, } __snake_case : List[Any] = "nlvr" __snake_case : Union[str, Any] = VisualBertConfig(**lowercase ) # Load State Dict __snake_case : Any = load_state_dict(lowercase ) __snake_case : Dict = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": __snake_case : Optional[Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": __snake_case : Tuple = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": __snake_case : Tuple = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": __snake_case : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') _UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
326
1
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=30 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=2 , ) -> Dict: '''simple docstring''' __snake_case : int = parent __snake_case : Dict = batch_size __snake_case : str = image_size __snake_case : Union[str, Any] = patch_size __snake_case : Tuple = num_channels __snake_case : Dict = is_training __snake_case : Dict = use_labels __snake_case : Tuple = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : Any = num_attention_heads __snake_case : List[str] = intermediate_size __snake_case : Optional[int] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Union[str, Any] = type_sequence_label_size __snake_case : Dict = initializer_range __snake_case : List[Any] = scope __snake_case : int = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __snake_case : Optional[int] = (image_size // patch_size) ** 2 __snake_case : List[str] = num_patches + 2 def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Tuple = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : Dict = DeiTModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : Dict = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : str = DeiTForMaskedImageModeling(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : str = model(UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __snake_case : List[str] = 1 __snake_case : Dict = DeiTForMaskedImageModeling(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : str = model(UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = self.type_sequence_label_size __snake_case : Any = DeiTForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : Optional[Any] = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __snake_case : List[Any] = 1 __snake_case : Dict = DeiTForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : List[str] = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : List[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = config_and_inputs __snake_case : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowerCamelCase ( a , a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : List[Any] =( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) UpperCAmelCase_ : List[str] =( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) UpperCAmelCase_ : Optional[int] =False UpperCAmelCase_ : List[Any] =False UpperCAmelCase_ : List[str] =False def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[Any] = DeiTModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = model_class(UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) ) def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[Any] = model_class(UpperCAmelCase ) __snake_case : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[str] = [*signature.parameters.keys()] __snake_case : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' if not self.model_tester.is_training: return __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : str = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCAmelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue __snake_case : str = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() __snake_case : Any = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) __snake_case : Dict = model(**UpperCAmelCase ).loss loss.backward() def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __snake_case : str = False __snake_case : Any = True for model_class in self.all_model_classes: if model_class in get_values(UpperCAmelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue __snake_case : Dict = model_class(UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(UpperCAmelCase ) model.train() __snake_case : List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) __snake_case : Optional[int] = model(**UpperCAmelCase ).loss loss.backward() def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[Any] = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCAmelCase ), *get_values(UpperCAmelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): __snake_case : Optional[Any] = problem_type["title"] __snake_case : List[str] = problem_type["num_labels"] __snake_case : str = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() __snake_case : Dict = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) if problem_type["num_labels"] > 1: __snake_case : Any = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) __snake_case : Tuple = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCAmelCase ) as warning_list: __snake_case : Tuple = model(**UpperCAmelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = DeiTModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def lowerCAmelCase__( ) -> List[Any]: __snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( UpperCAmelCase ) __snake_case : Any = self.default_image_processor __snake_case : List[Any] = prepare_img() __snake_case : Any = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : str = model(**UpperCAmelCase ) # verify the logits __snake_case : List[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) __snake_case : Any = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Optional[Any] = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) __snake_case : Optional[int] = self.default_image_processor __snake_case : Optional[int] = prepare_img() __snake_case : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="pt" ) __snake_case : Tuple = inputs.pixel_values.to(UpperCAmelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __snake_case : int = model(UpperCAmelCase )
326
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple: # Load configuration defined in the metadata file with open(lowercase ) as metadata_file: __snake_case : int = json.load(lowercase ) __snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] ) # Load in the weights from the checkpoint_path __snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"] # Load the entity vocab file __snake_case : Tuple = load_original_entity_vocab(lowercase ) # add an entry for [MASK2] __snake_case : Optional[int] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 __snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks __snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase ) __snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(lowercase ) with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f: __snake_case : Tuple = json.load(lowercase ) __snake_case : List[Any] = "MLukeTokenizer" with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(lowercase , lowercase ) __snake_case : Any = MLukeTokenizer.from_pretrained(lowercase ) # Initialize the embeddings of the special tokens __snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0] __snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0] __snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"] __snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: __snake_case : List[Any] = state_dict[bias_name] __snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 ) __snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 ) __snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self.""" __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] __snake_case : str = state_dict[prefix + matrix_name] __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"] __snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Any = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' __snake_case : List[Any] = state_dict["entity_predictions.bias"] __snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) __snake_case : Any = LukeForMaskedLM(config=lowercase ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) __snake_case : int = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): __snake_case : str = state_dict[key] else: __snake_case : str = state_dict[key] __snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase ) if set(lowercase ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(lowercase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs __snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" ) __snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." __snake_case : Union[str, Any] = (0, 9) __snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : Any = model(**lowercase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : Optional[Any] = torch.Size((1, 33, 768) ) __snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : str = torch.Size((1, 1, 768) ) __snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction __snake_case : str = MLukeTokenizer.from_pretrained(lowercase ) __snake_case : Dict = "Tokyo is the capital of <mask>." __snake_case : Union[str, Any] = (24, 30) __snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : int = model(**lowercase ) __snake_case : Dict = encoding["input_ids"][0].tolist() __snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) __snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase ) __snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item() __snake_case : Optional[int] = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase ) ) model.save_pretrained(lowercase ) def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]: __snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"] __snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )] __snake_case : Any = {} for entry in data: __snake_case : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: __snake_case : Optional[int] = entity_id break __snake_case : Union[str, Any] = f"""{language}:{entity_name}""" __snake_case : Any = entity_id return new_mapping if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) _UpperCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } _UpperCamelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } _UpperCamelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : int =VOCAB_FILES_NAMES UpperCAmelCase_ : Tuple =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Tuple =PRETRAINED_INIT_CONFIGURATION UpperCAmelCase_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : List[Any] =ConvBertTokenizer def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> List[str]: '''simple docstring''' super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) __snake_case : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): __snake_case : List[Any] = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) __snake_case : Optional[int] = do_lower_case __snake_case : Dict = strip_accents __snake_case : Tuple = tokenize_chinese_chars __snake_case : Dict = normalizer_class(**UpperCAmelCase ) __snake_case : List[str] = do_lower_case def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> int: '''simple docstring''' __snake_case : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' __snake_case : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
326
from maths.prime_factors import prime_factors def lowerCAmelCase__( lowercase : int ) -> int: if not isinstance(lowercase , lowercase ): __snake_case : Optional[int] = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(lowercase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
326
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCamelCase : """simple docstring""" @staticmethod def UpperCAmelCase ( *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]: '''simple docstring''' pass @is_pipeline_test @require_vision class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @require_torch def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : str = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) __snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __snake_case : Dict = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCAmelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) __snake_case : str = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @require_tf def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : List[str] = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) __snake_case : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __snake_case : Dict = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) __snake_case : Optional[Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @slow @require_torch def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes __snake_case : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __snake_case : str = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) __snake_case : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : int = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes __snake_case : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) __snake_case : Tuple = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) __snake_case : Optional[int] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
326
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) __snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" ) __snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids __snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids __snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) __snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits __snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean() __snake_case : Any = -(labels.shape[-1] * loss.item()) __snake_case : List[str] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
326
1
import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class _lowerCamelCase ( a ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Any = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCAmelCase , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(UpperCAmelCase , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(UpperCAmelCase , "num_encoder_blocks" ) ) class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=64 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[8, 4, 2, 1] , UpperCAmelCase=[16, 32, 64, 128] , UpperCAmelCase=[1, 4, 8, 16] , UpperCAmelCase=[1, 2, 4, 8] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = parent __snake_case : List[str] = batch_size __snake_case : int = image_size __snake_case : Optional[int] = num_channels __snake_case : Optional[Any] = num_encoder_blocks __snake_case : Tuple = sr_ratios __snake_case : Union[str, Any] = depths __snake_case : Optional[int] = hidden_sizes __snake_case : Optional[int] = downsampling_rates __snake_case : int = num_attention_heads __snake_case : Tuple = is_training __snake_case : List[str] = use_labels __snake_case : Optional[int] = hidden_act __snake_case : int = hidden_dropout_prob __snake_case : str = attention_probs_dropout_prob __snake_case : Union[str, Any] = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Dict = scope def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Optional[Any] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case : int = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self ) -> int: '''simple docstring''' return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : Dict = SegformerModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : List[Any] = model(UpperCAmelCase ) __snake_case : List[str] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : int = self.num_labels __snake_case : List[str] = SegformerForSemanticSegmentation(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : str = model(UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) __snake_case : str = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = 1 __snake_case : Tuple = SegformerForSemanticSegmentation(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : Optional[Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCAmelCase ) __snake_case : Tuple = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertGreater(result.loss , 0.0 ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Dict = config_and_inputs __snake_case : str = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowerCamelCase ( a , a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) UpperCAmelCase_ : Any =( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase_ : Optional[Any] =True UpperCAmelCase_ : Optional[int] =False UpperCAmelCase_ : Tuple =False UpperCAmelCase_ : Any =False def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : List[Any] = SegformerModelTester(self ) __snake_case : List[str] = SegformerConfigTester(self , config_class=UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*UpperCAmelCase ) @unittest.skip("SegFormer does not use inputs_embeds" ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(UpperCAmelCase ) __snake_case : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Tuple = [*signature.parameters.keys()] __snake_case : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Dict = True for model_class in self.all_model_classes: __snake_case : Tuple = True __snake_case : Any = False __snake_case : List[str] = True __snake_case : Optional[Any] = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): __snake_case : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) __snake_case : Any = outputs.attentions __snake_case : Union[str, Any] = sum(self.model_tester.depths ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case : Tuple = True __snake_case : Any = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): __snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) __snake_case : Union[str, Any] = outputs.attentions self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) # verify the first attentions (first block, first layer) __snake_case : Dict = (self.model_tester.image_size // 4) ** 2 __snake_case : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) __snake_case : str = (self.model_tester.image_size // 32) ** 2 __snake_case : Optional[Any] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) __snake_case : Optional[Any] = len(UpperCAmelCase ) # Check attention is always last and order is fine __snake_case : Dict = True __snake_case : Dict = True __snake_case : List[str] = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): __snake_case : Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + 1 , len(UpperCAmelCase ) ) __snake_case : int = outputs.attentions self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) # verify the first attentions (first block, first layer) __snake_case : Optional[int] = (self.model_tester.image_size // 4) ** 2 __snake_case : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): __snake_case : Dict = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): __snake_case : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) __snake_case : Any = outputs.hidden_states __snake_case : str = self.model_tester.num_encoder_blocks self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[Any] = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Tuple = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' if not self.model_tester.is_training: return __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Optional[int] = True for model_class in self.all_model_classes: if model_class in get_values(UpperCAmelCase ): continue __snake_case : List[str] = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() __snake_case : List[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) __snake_case : Optional[Any] = model(**UpperCAmelCase ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass @slow def UpperCAmelCase ( self ) -> int: '''simple docstring''' for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Dict = SegformerModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def lowerCAmelCase__( ) -> Dict: __snake_case : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : str = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase ) __snake_case : List[str] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCAmelCase ) __snake_case : Dict = prepare_img() __snake_case : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors="pt" ) __snake_case : List[Any] = encoded_inputs.pixel_values.to(UpperCAmelCase ) with torch.no_grad(): __snake_case : List[Any] = model(UpperCAmelCase ) __snake_case : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) __snake_case : Optional[int] = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase , atol=1E-4 ) ) @slow def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase ) __snake_case : List[Any] = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCAmelCase ) __snake_case : int = prepare_img() __snake_case : Union[str, Any] = image_processor(images=UpperCAmelCase , return_tensors="pt" ) __snake_case : Dict = encoded_inputs.pixel_values.to(UpperCAmelCase ) with torch.no_grad(): __snake_case : int = model(UpperCAmelCase ) __snake_case : List[str] = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) __snake_case : Optional[int] = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase , atol=1E-1 ) ) @slow def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase ) __snake_case : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( UpperCAmelCase ) __snake_case : str = prepare_img() __snake_case : Any = image_processor(images=UpperCAmelCase , return_tensors="pt" ) __snake_case : List[str] = encoded_inputs.pixel_values.to(UpperCAmelCase ) with torch.no_grad(): __snake_case : str = model(UpperCAmelCase ) __snake_case : int = outputs.logits.detach().cpu() __snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(500, 300)] ) __snake_case : int = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCAmelCase ) __snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase ) __snake_case : List[Any] = torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , UpperCAmelCase )
326
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]: '''simple docstring''' super().__init__(UpperCAmelCase ) __snake_case : Optional[int] = proj_size __snake_case : str = CLIPVisionModel(UpperCAmelCase ) __snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase ) __snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size ) __snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]: '''simple docstring''' __snake_case : int = self.model(pixel_values=UpperCAmelCase ) __snake_case : Optional[int] = clip_output.pooler_output __snake_case : Any = self.mapper(latent_states[:, None] ) __snake_case : Any = self.final_layer_norm(UpperCAmelCase ) __snake_case : str = self.proj_out(UpperCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : List[Any] = (config.num_hidden_layers + 1) // 5 __snake_case : Dict = config.hidden_size __snake_case : str = 1 __snake_case : List[Any] = nn.ModuleList( [ BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase ) for _ in range(UpperCAmelCase ) ] ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' for block in self.blocks: __snake_case : int = block(UpperCAmelCase ) return hidden_states
326
1
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _UpperCamelCase = logging.getLogger(__name__) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : str ="summarization" UpperCAmelCase_ : Any =["loss"] UpperCAmelCase_ : int =ROUGE_KEYS UpperCAmelCase_ : Any ="rouge2" def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> Tuple: '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: __snake_case : str = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" ) if hparams.sortish_sampler: raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" ) super().__init__(UpperCAmelCase , num_labels=UpperCAmelCase , mode=self.mode , **UpperCAmelCase ) use_task_specific_params(self.model , "summarization" ) save_git_info(self.hparams.output_dir ) __snake_case : int = Path(self.output_dir ) / "metrics.json" __snake_case : int = Path(self.output_dir ) / "hparams.pkl" pickle_save(self.hparams , self.hparams_save_path ) __snake_case : Any = 0 __snake_case : Any = defaultdict(UpperCAmelCase ) __snake_case : int = self.config.model_type __snake_case : Dict = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size __snake_case : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } __snake_case : Optional[int] = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } __snake_case : Dict = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} __snake_case : int = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}""" assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}""" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) __snake_case : Union[str, Any] = get_git_info()["repo_sha"] __snake_case : List[Any] = hparams.num_workers __snake_case : int = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase ): __snake_case : int = self.tokenizer.lang_code_to_id[hparams.tgt_lang] __snake_case : Optional[int] = self.decoder_start_token_id __snake_case : Any = ( SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset ) __snake_case : int = False __snake_case : Optional[int] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: __snake_case : int = self.hparams.eval_max_gen_length else: __snake_case : Tuple = self.model.config.max_length __snake_case : int = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict[str, List[str]]: '''simple docstring''' __snake_case : Optional[int] = { k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items() } save_json(UpperCAmelCase , Path(self.output_dir ) / "text_batch.json" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" ) __snake_case : Union[str, Any] = True return readable_batch def UpperCAmelCase ( self , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.model(UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __snake_case : str = self.tokenizer.batch_decode( UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) return lmap(str.strip , UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' __snake_case : Dict = self.tokenizer.pad_token_id __snake_case , __snake_case : int = batch["input_ids"], batch["attention_mask"] __snake_case : int = batch["labels"] if isinstance(self.model , UpperCAmelCase ): __snake_case : List[str] = self.model._shift_right(UpperCAmelCase ) else: __snake_case : Optional[int] = shift_tokens_right(UpperCAmelCase , UpperCAmelCase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero __snake_case : List[Any] = decoder_input_ids self.save_readable_batch(UpperCAmelCase ) __snake_case : Union[str, Any] = self(UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , use_cache=UpperCAmelCase ) __snake_case : Any = outputs["logits"] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id __snake_case : Tuple = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase ) assert lm_logits.shape[-1] == self.vocab_size __snake_case : str = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: __snake_case : Optional[int] = nn.functional.log_softmax(UpperCAmelCase , dim=-1 ) __snake_case , __snake_case : Tuple = label_smoothed_nll_loss( UpperCAmelCase , UpperCAmelCase , self.hparams.label_smoothing , ignore_index=UpperCAmelCase ) return (loss,) @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return self.tokenizer.pad_token_id def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict: '''simple docstring''' __snake_case : Any = self._step(UpperCAmelCase ) __snake_case : int = dict(zip(self.loss_names , UpperCAmelCase ) ) # tokens per batch __snake_case : str = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum() __snake_case : Union[str, Any] = batch["input_ids"].shape[0] __snake_case : Any = batch["input_ids"].eq(self.pad ).sum() __snake_case : str = batch["input_ids"].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict: '''simple docstring''' return self._generative_step(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase="val" ) -> Dict: '''simple docstring''' self.step_count += 1 __snake_case : Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} __snake_case : str = losses["loss"] __snake_case : Optional[int] = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"] } __snake_case : Any = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) __snake_case : torch.FloatTensor = torch.tensor(UpperCAmelCase ).type_as(UpperCAmelCase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(UpperCAmelCase ) __snake_case : Optional[int] = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()} __snake_case : int = self.step_count self.metrics[prefix].append(UpperCAmelCase ) # callback writes this to self.metrics_save_path __snake_case : Dict = flatten_list([x["preds"] for x in outputs] ) return { "log": all_metrics, "preds": preds, F"""{prefix}_loss""": loss, F"""{prefix}_{self.val_metric}""": metric_tensor, } def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict: '''simple docstring''' return calculate_rouge(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> dict: '''simple docstring''' __snake_case : Optional[int] = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') __snake_case : Optional[Any] = self.model.generate( batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=UpperCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) __snake_case : Optional[int] = (time.time() - ta) / batch["input_ids"].shape[0] __snake_case : List[str] = self.ids_to_clean_text(UpperCAmelCase ) __snake_case : List[str] = self.ids_to_clean_text(batch["labels"] ) __snake_case : List[Any] = self._step(UpperCAmelCase ) __snake_case : List[Any] = dict(zip(self.loss_names , UpperCAmelCase ) ) __snake_case : Dict = self.calc_generative_metrics(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = np.mean(lmap(UpperCAmelCase , UpperCAmelCase ) ) base_metrics.update(gen_time=UpperCAmelCase , gen_len=UpperCAmelCase , preds=UpperCAmelCase , target=UpperCAmelCase , **UpperCAmelCase ) return base_metrics def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return self._generative_step(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return self.validation_epoch_end(UpperCAmelCase , prefix="test" ) def UpperCAmelCase ( self , UpperCAmelCase ) -> SeqaSeqDataset: '''simple docstring''' __snake_case : Optional[Any] = self.n_obs[type_path] __snake_case : Optional[int] = self.target_lens[type_path] __snake_case : Any = self.dataset_class( self.tokenizer , type_path=UpperCAmelCase , n_obs=UpperCAmelCase , max_target_length=UpperCAmelCase , **self.dataset_kwargs , ) return dataset def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> DataLoader: '''simple docstring''' __snake_case : List[str] = self.get_dataset(UpperCAmelCase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": __snake_case : Union[str, Any] = dataset.make_sortish_sampler(UpperCAmelCase , distributed=self.hparams.gpus > 1 ) return DataLoader( UpperCAmelCase , batch_size=UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase , num_workers=self.num_workers , sampler=UpperCAmelCase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": __snake_case : Union[str, Any] = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( UpperCAmelCase , batch_sampler=UpperCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( UpperCAmelCase , batch_size=UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase , num_workers=self.num_workers , sampler=UpperCAmelCase , ) def UpperCAmelCase ( self ) -> DataLoader: '''simple docstring''' __snake_case : str = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase ) return dataloader def UpperCAmelCase ( self ) -> DataLoader: '''simple docstring''' return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size ) def UpperCAmelCase ( self ) -> DataLoader: '''simple docstring''' return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size ) @staticmethod def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' BaseTransformer.add_model_specific_args(UpperCAmelCase , UpperCAmelCase ) add_generic_args(UpperCAmelCase , UpperCAmelCase ) parser.add_argument( "--max_source_length" , default=1024 , type=UpperCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--max_target_length" , default=56 , type=UpperCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--val_max_target_length" , default=142 , type=UpperCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--test_max_target_length" , default=142 , type=UpperCAmelCase , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument("--freeze_encoder" , action="store_true" ) parser.add_argument("--freeze_embeds" , action="store_true" ) parser.add_argument("--sortish_sampler" , action="store_true" , default=UpperCAmelCase ) parser.add_argument("--overwrite_output_dir" , action="store_true" , default=UpperCAmelCase ) parser.add_argument("--max_tokens_per_batch" , type=UpperCAmelCase , default=UpperCAmelCase ) parser.add_argument("--logger_name" , type=UpperCAmelCase , choices=["default", "wandb", "wandb_shared"] , default="default" ) parser.add_argument("--n_train" , type=UpperCAmelCase , default=-1 , required=UpperCAmelCase , help="# examples. -1 means use all." ) parser.add_argument("--n_val" , type=UpperCAmelCase , default=500 , required=UpperCAmelCase , help="# examples. -1 means use all." ) parser.add_argument("--n_test" , type=UpperCAmelCase , default=-1 , required=UpperCAmelCase , help="# examples. -1 means use all." ) parser.add_argument( "--task" , type=UpperCAmelCase , default="summarization" , required=UpperCAmelCase , help="# examples. -1 means use all." ) parser.add_argument("--label_smoothing" , type=UpperCAmelCase , default=0.0 , required=UpperCAmelCase ) parser.add_argument("--src_lang" , type=UpperCAmelCase , default="" , required=UpperCAmelCase ) parser.add_argument("--tgt_lang" , type=UpperCAmelCase , default="" , required=UpperCAmelCase ) parser.add_argument("--eval_beams" , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase ) parser.add_argument( "--val_metric" , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase , choices=["bleu", "rouge2", "loss", None] ) parser.add_argument("--eval_max_gen_length" , type=UpperCAmelCase , default=UpperCAmelCase , help="never generate more than n tokens" ) parser.add_argument("--save_top_k" , type=UpperCAmelCase , default=1 , required=UpperCAmelCase , help="How many checkpoints to save" ) parser.add_argument( "--early_stopping_patience" , type=UpperCAmelCase , default=-1 , required=UpperCAmelCase , help=( "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" " val_check_interval will effect it." ) , ) return parser class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : int ="translation" UpperCAmelCase_ : str =["loss"] UpperCAmelCase_ : Union[str, Any] =["bleu"] UpperCAmelCase_ : Union[str, Any] ="bleu" def __init__( self , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' super().__init__(UpperCAmelCase , **UpperCAmelCase ) __snake_case : Optional[int] = hparams.src_lang __snake_case : Optional[int] = hparams.tgt_lang def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> dict: '''simple docstring''' return calculate_bleu(UpperCAmelCase , UpperCAmelCase ) def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any]=None ) -> SummarizationModule: Path(args.output_dir ).mkdir(exist_ok=lowercase ) check_output_dir(lowercase , expected_items=3 ) if model is None: if "summarization" in args.task: __snake_case : SummarizationModule = SummarizationModule(lowercase ) else: __snake_case : SummarizationModule = TranslationModule(lowercase ) __snake_case : Any = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("/tmp" ) or str(args.output_dir ).startswith("/var" ) ): __snake_case : Union[str, Any] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger __snake_case : List[str] = os.environ.get("WANDB_PROJECT" , lowercase ) __snake_case : str = WandbLogger(name=model.output_dir.name , project=lowercase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger __snake_case : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" ) if args.early_stopping_patience >= 0: __snake_case : Dict = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: __snake_case : Optional[int] = False __snake_case : Dict = args.val_metric == "loss" __snake_case : pl.Trainer = generic_train( lowercase , lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , lowercase ) , early_stopping_callback=lowercase , logger=lowercase , ) pickle_save(model.hparams , model.output_dir / "hparams.pkl" ) if not args.do_predict: return model __snake_case : Tuple = "" __snake_case : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=lowercase ) ) if checkpoints: __snake_case : Tuple = checkpoints[-1] __snake_case : Optional[Any] = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() _UpperCamelCase = pl.Trainer.add_argparse_args(parser) _UpperCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _UpperCamelCase = parser.parse_args() main(args)
326
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
326
1
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]: '''simple docstring''' super().__init__(UpperCAmelCase ) __snake_case : Optional[int] = proj_size __snake_case : str = CLIPVisionModel(UpperCAmelCase ) __snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase ) __snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size ) __snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]: '''simple docstring''' __snake_case : int = self.model(pixel_values=UpperCAmelCase ) __snake_case : Optional[int] = clip_output.pooler_output __snake_case : Any = self.mapper(latent_states[:, None] ) __snake_case : Any = self.final_layer_norm(UpperCAmelCase ) __snake_case : str = self.proj_out(UpperCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : List[Any] = (config.num_hidden_layers + 1) // 5 __snake_case : Dict = config.hidden_size __snake_case : str = 1 __snake_case : List[Any] = nn.ModuleList( [ BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase ) for _ in range(UpperCAmelCase ) ] ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' for block in self.blocks: __snake_case : int = block(UpperCAmelCase ) return hidden_states
326
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = torch.device('''cpu''') def lowerCAmelCase__( ) -> Any: __snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCAmelCase__( lowercase : Dict ) -> List[Any]: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] ) def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]: __snake_case : List[Any] = dct.pop(lowercase ) __snake_case : List[Any] = val def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple: __snake_case : Optional[Any] = [] for k in state_dict.keys(): __snake_case : Union[str, Any] = k if ".pwconv" in k: __snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: __snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: __snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: __snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: __snake_case : int = k_new.split("." ) if ls[2].isdigit(): __snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: __snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]: __snake_case : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __snake_case : Tuple = 1000 __snake_case : Any = "huggingface/label-files" __snake_case : int = "imagenet-1k-id2label.json" __snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) __snake_case : str = {int(lowercase ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __snake_case : Optional[Any] = [3, 3, 6, 4] __snake_case : Optional[int] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __snake_case : List[str] = [3, 3, 9, 6] __snake_case : Optional[Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __snake_case : Optional[int] = [4, 3, 10, 5] __snake_case : Dict = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __snake_case : str = [4, 4, 12, 6] __snake_case : Optional[Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): __snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase ) else: __snake_case : Tuple = torch.load(lowercase , map_location="cpu" ) __snake_case : Optional[int] = checkpoint __snake_case : Any = create_rename_keys(lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) # load HuggingFace model __snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval() hf_model.load_state_dict(lowercase ) # prepare test inputs __snake_case : Optional[Any] = prepare_img() __snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" ) __snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" ) # compare outputs from both models __snake_case : str = get_expected_output(lowercase ) __snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _UpperCamelCase = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
326
1
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[int] =["input_values", "attention_mask"] def __init__( self , UpperCAmelCase = 1 , UpperCAmelCase = 16000 , UpperCAmelCase = 0.0 , UpperCAmelCase = False , UpperCAmelCase = 80 , UpperCAmelCase = 16 , UpperCAmelCase = 64 , UpperCAmelCase = "hann_window" , UpperCAmelCase = 1.0 , UpperCAmelCase = 80 , UpperCAmelCase = 7600 , UpperCAmelCase = 1E-10 , UpperCAmelCase = 2 , UpperCAmelCase = True , **UpperCAmelCase , ) -> Tuple: '''simple docstring''' super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase ) __snake_case : Union[str, Any] = do_normalize __snake_case : Any = return_attention_mask __snake_case : Union[str, Any] = num_mel_bins __snake_case : Optional[int] = hop_length __snake_case : Optional[Any] = win_length __snake_case : int = win_function __snake_case : Optional[Any] = frame_signal_scale __snake_case : List[Any] = fmin __snake_case : Any = fmax __snake_case : List[Any] = mel_floor __snake_case : Optional[Any] = reduction_factor __snake_case : Dict = win_length * sampling_rate // 1000 __snake_case : int = hop_length * sampling_rate // 1000 __snake_case : Union[str, Any] = optimal_fft_length(self.sample_size ) __snake_case : Any = (self.n_fft // 2) + 1 __snake_case : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase ) __snake_case : Dict = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , ) if frame_signal_scale != 1.0: warnings.warn( "The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( "The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: __snake_case : Any = np.array(UpperCAmelCase , np.intaa ) __snake_case : List[Any] = [] for vector, length in zip(UpperCAmelCase , attention_mask.sum(-1 ) ): __snake_case : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: __snake_case : Dict = padding_value normed_input_values.append(UpperCAmelCase ) else: __snake_case : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def UpperCAmelCase ( self , UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __snake_case : Union[str, Any] = spectrogram( UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , ) return log_mel_spec.T def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> BatchFeature: '''simple docstring''' if audio is None and audio_target is None: raise ValueError("You must provide either `audio` or `audio_target` values." ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if audio is not None: __snake_case : Union[str, Any] = self._process_audio( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) else: __snake_case : Any = None if audio_target is not None: __snake_case : Dict = self._process_audio( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) if inputs is None: return inputs_target else: __snake_case : Any = inputs_target["input_values"] __snake_case : Union[str, Any] = inputs_target.get("attention_mask" ) if decoder_attention_mask is not None: __snake_case : Optional[int] = decoder_attention_mask return inputs def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> BatchFeature: '''simple docstring''' __snake_case : Union[str, Any] = isinstance(UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) __snake_case : Any = is_batched_numpy or ( isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __snake_case : Dict = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ): __snake_case : Union[str, Any] = np.asarray(UpperCAmelCase , dtype=np.floataa ) elif isinstance(UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): __snake_case : List[Any] = speech.astype(np.floataa ) # always return batch if not is_batched: __snake_case : str = [speech] # needed to make pad() work on spectrogram inputs __snake_case : List[Any] = self.feature_size # convert into correct format for padding if is_target: __snake_case : Dict = [self._extract_mel_features(UpperCAmelCase ) for waveform in speech] __snake_case : Optional[Any] = BatchFeature({"input_values": features} ) __snake_case : Optional[Any] = self.num_mel_bins else: __snake_case : str = BatchFeature({"input_values": speech} ) __snake_case : List[Any] = self.pad( UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , ) __snake_case : Dict = feature_size_hack # convert input values to correct format __snake_case : Dict = padded_inputs["input_values"] if not isinstance(input_values[0] , np.ndarray ): __snake_case : Tuple = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): __snake_case : str = [array.astype(np.floataa ) for array in input_values] elif isinstance(UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): __snake_case : Optional[Any] = input_values.astype(np.floataa ) # convert attention_mask to correct format __snake_case : Any = padded_inputs.get("attention_mask" ) if attention_mask is not None: __snake_case : Union[str, Any] = [np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: __snake_case : Tuple = ( attention_mask if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) __snake_case : Optional[Any] = self.zero_mean_unit_var_norm( padded_inputs["input_values"] , attention_mask=UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: __snake_case : Optional[int] = padded_inputs.convert_to_tensors(UpperCAmelCase ) return padded_inputs def UpperCAmelCase ( self ) -> Dict[str, Any]: '''simple docstring''' __snake_case : Dict = super().to_dict() # Don't serialize these as they are derived from the other properties. __snake_case : List[str] = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"] for name in names: if name in output: del output[name] return output
326
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) _UpperCamelCase = logging.getLogger(__name__) def lowerCAmelCase__( lowercase : str ) -> List[str]: __snake_case : int = git.Repo(search_parent_directories=lowercase ) __snake_case : Union[str, Any] = { "repo_id": str(lowercase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f: json.dump(lowercase , lowercase , indent=4 ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]: if params.n_gpu <= 0: __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = -1 __snake_case : Union[str, Any] = True __snake_case : Tuple = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : int = int(os.environ["N_GPU_NODE"] ) __snake_case : Union[str, Any] = int(os.environ["RANK"] ) # number of nodes / node ID __snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node __snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node __snake_case : Union[str, Any] = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __snake_case : Any = 1 __snake_case : str = 0 __snake_case : Optional[Any] = 0 __snake_case : Dict = 0 __snake_case : int = 1 __snake_case : Optional[Any] = 1 __snake_case : Tuple = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0 __snake_case : List[Any] = params.n_nodes > 1 # summary __snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
326
1
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007 def lowerCAmelCase__( lowercase : Vector , lowercase : Vector ) -> VectorOut: return np.sqrt(np.sum((np.asarray(lowercase ) - np.asarray(lowercase )) ** 2 ) ) def lowerCAmelCase__( lowercase : Vector , lowercase : Vector ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(lowercase , lowercase ) ) ** (1 / 2) if __name__ == "__main__": def lowerCAmelCase__( ) -> None: from timeit import timeit print("Without Numpy" ) print( timeit( "euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) ) print("With Numpy" ) print( timeit( "euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) ) benchmark()
326
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : str =JukeboxTokenizer UpperCAmelCase_ : Tuple ={ "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) __snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : Optional[Any] = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) __snake_case : Tuple = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : int = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
326
1
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class _lowerCamelCase ( datasets.BeamBasedBuilder ): """simple docstring""" def UpperCAmelCase ( self ) -> int: '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=UpperCAmelCase , ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase ) class _lowerCamelCase ( datasets.BeamBasedBuilder ): """simple docstring""" def UpperCAmelCase ( self ) -> Any: '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=UpperCAmelCase , ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase ) def lowerCAmelCase__( ) -> Optional[int]: return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def lowerCAmelCase__( ) -> Tuple: return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class _lowerCamelCase ( a ): """simple docstring""" @require_beam def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __snake_case : Tuple = DummyBeamDataset(cache_dir=UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(UpperCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __snake_case : Any = builder.as_dataset() self.assertEqual(dset["train"].num_rows , UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def UpperCAmelCase ( self ) -> int: '''simple docstring''' import apache_beam as beam __snake_case : Optional[int] = beam.io.parquetio.WriteToParquet __snake_case : str = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __snake_case : str = DummyBeamDataset(cache_dir=UpperCAmelCase , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: __snake_case : Dict = partial(UpperCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( UpperCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( UpperCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) __snake_case : List[str] = builder.as_dataset() self.assertEqual(dset["train"].num_rows , UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: __snake_case : Tuple = DummyBeamDataset(cache_dir=UpperCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Optional[int] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __snake_case : Dict = NestedBeamDataset(cache_dir=UpperCAmelCase , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(UpperCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) __snake_case : Any = builder.as_dataset() self.assertEqual(dset["train"].num_rows , UpperCAmelCase ) self.assertEqual(dset["train"].info.splits["train"].num_examples , UpperCAmelCase ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
326
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : str UpperCAmelCase_ : str =None @staticmethod def UpperCAmelCase ( ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def UpperCAmelCase ( cls ) -> Tuple: '''simple docstring''' return F"""`pip install {cls.pip_package or cls.name}`""" class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[int] ="optuna" @staticmethod def UpperCAmelCase ( ) -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict: '''simple docstring''' return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' return default_hp_space_optuna(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[str] ="ray" UpperCAmelCase_ : Dict ="'ray[tune]'" @staticmethod def UpperCAmelCase ( ) -> str: '''simple docstring''' return is_ray_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return default_hp_space_ray(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple ="sigopt" @staticmethod def UpperCAmelCase ( ) -> int: '''simple docstring''' return is_sigopt_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict: '''simple docstring''' return default_hp_space_sigopt(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : str ="wandb" @staticmethod def UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' return is_wandb_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return default_hp_space_wandb(UpperCAmelCase ) _UpperCamelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCAmelCase__( ) -> str: __snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase ) > 0: __snake_case : Dict = available_backends[0].name if len(lowercase ) > 1: logger.info( f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
326
1
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''nielsr/canine-s''': 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCamelCase = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCamelCase = 0 _UpperCamelCase = 0xE0_00 _UpperCamelCase = 0xE0_01 _UpperCamelCase = 0xE0_02 _UpperCamelCase = 0xE0_03 _UpperCamelCase = 0xE0_04 # Maps special codepoints to human-readable names. _UpperCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=chr(UpperCAmelCase ) , UpperCAmelCase=False , UpperCAmelCase=2048 , **UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token __snake_case : str = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token __snake_case : int = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token __snake_case : List[str] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token __snake_case : str = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __snake_case : int = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , model_max_length=UpperCAmelCase , **UpperCAmelCase , ) # Creates a mapping for looking up the IDs of special symbols. __snake_case : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): __snake_case : Any = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. __snake_case : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } __snake_case : Optional[int] = UNICODE_VOCAB_SIZE __snake_case : int = len(self._special_codepoints ) @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return self._unicode_vocab_size def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return list(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' try: return ord(UpperCAmelCase ) except TypeError: raise ValueError(F"""invalid token: '{token}'""" ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCAmelCase ) except TypeError: raise ValueError(F"""invalid id: {index}""" ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' return "".join(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : Any = [self.cls_token_id] __snake_case : List[str] = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) __snake_case : List[Any] = [1] + ([0] * len(UpperCAmelCase )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCAmelCase )) + [1] return result def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : str = [self.sep_token_id] __snake_case : str = [self.cls_token_id] __snake_case : Optional[int] = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[Any]: '''simple docstring''' return ()
326
import math def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list: __snake_case : Any = end or len(lowercase ) for i in range(lowercase , lowercase ): __snake_case : List[str] = i __snake_case : Union[str, Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __snake_case : Optional[Any] = array[temp_index - 1] temp_index -= 1 __snake_case : Any = temp_index_value return array def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap __snake_case : Any = index __snake_case : Optional[Any] = 2 * index + 1 # Left Node __snake_case : str = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __snake_case : Optional[int] = left_index if right_index < heap_size and array[largest] < array[right_index]: __snake_case : Tuple = right_index if largest != index: __snake_case , __snake_case : int = array[largest], array[index] heapify(lowercase , lowercase , lowercase ) def lowerCAmelCase__( lowercase : list ) -> list: __snake_case : List[str] = len(lowercase ) for i in range(n // 2 , -1 , -1 ): heapify(lowercase , lowercase , lowercase ) for i in range(n - 1 , 0 , -1 ): __snake_case , __snake_case : Optional[Any] = array[0], array[i] heapify(lowercase , 0 , lowercase ) return array def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: __snake_case : Union[str, Any] = low __snake_case : Union[str, Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __snake_case , __snake_case : str = array[j], array[i] i += 1 def lowerCAmelCase__( lowercase : list ) -> list: if len(lowercase ) == 0: return array __snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) ) __snake_case : Dict = 16 return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase ) def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(lowercase ) max_depth -= 1 __snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 ) __snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase ) intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase ) __snake_case : List[str] = p return insertion_sort(lowercase , lowercase , lowercase ) if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip() _UpperCamelCase = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
326
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def lowerCAmelCase__( lowercase : List[str]=None ) -> Any: __snake_case : Optional[int] = argparse.ArgumentParser(add_help=lowercase , allow_abbrev=lowercase ) # The main config parser __snake_case : Union[str, Any] = config_command_parser(lowercase ) # The subparser to add commands to __snake_case : Any = config_parser.add_subparsers(title="subcommands" , dest="subcommand" ) # Then add other parsers with the parent parser default_command_parser(lowercase , parents=[parent_parser] ) update_command_parser(lowercase , parents=[parent_parser] ) return config_parser def lowerCAmelCase__( ) -> Any: __snake_case : Dict = get_config_parser() __snake_case : Any = config_parser.parse_args() if not hasattr(lowercase , "func" ): config_parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
326
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowerCAmelCase__( ) -> List[Any]: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case : Any = [1, 2, 3] with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=2 ) with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def lowerCAmelCase__( lowercase : Dict ) -> Dict: __snake_case : Any = [1, 2] __snake_case : Dict = {"a": 1, "b": 2} __snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]} __snake_case : int = {"a": {"1": 1}, "b": 2} __snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case : Dict = [2, 3] __snake_case : Tuple = {"a": 2, "b": 3} __snake_case : int = {"a": [2, 3], "b": [4, 5]} __snake_case : Dict = {"a": {"1": 2}, "b": 3} __snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
326
1
from math import sqrt def lowerCAmelCase__( lowercase : int ) -> bool: assert isinstance(lowercase , lowercase ) and ( number >= 0 ), "'number' must been an int and positive" __snake_case : List[Any] = True # 0 and 1 are none primes. if number <= 1: __snake_case : Optional[int] = False for divisor in range(2 , int(round(sqrt(lowercase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __snake_case : List[Any] = False break # precondition assert isinstance(lowercase , lowercase ), "'status' must been from type bool" return status def lowerCAmelCase__( lowercase : Dict ) -> List[Any]: assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __snake_case : int = list(range(2 , n + 1 ) ) __snake_case : List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowercase ) ): for j in range(i + 1 , len(lowercase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __snake_case : Optional[int] = 0 # filters actual prime numbers. __snake_case : Optional[int] = [x for x in begin_list if x != 0] # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type list" return ans def lowerCAmelCase__( lowercase : Optional[int] ) -> Dict: assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2" __snake_case : Union[str, Any] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowercase ): ans.append(lowercase ) # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type list" return ans def lowerCAmelCase__( lowercase : Dict ) -> List[Any]: assert isinstance(lowercase , lowercase ) and number >= 0, "'number' must been an int and >= 0" __snake_case : str = [] # this list will be returns of the function. # potential prime number factors. __snake_case : Any = 2 __snake_case : List[str] = number if number == 0 or number == 1: ans.append(lowercase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowercase ): while quotient != 1: if is_prime(lowercase ) and (quotient % factor == 0): ans.append(lowercase ) quotient /= factor else: factor += 1 else: ans.append(lowercase ) # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type list" return ans def lowerCAmelCase__( lowercase : Dict ) -> str: assert isinstance(lowercase , lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __snake_case : Optional[int] = 0 # prime factorization of 'number' __snake_case : List[Any] = prime_factorization(lowercase ) __snake_case : List[Any] = max(lowercase ) # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type int" return ans def lowerCAmelCase__( lowercase : int ) -> int: assert isinstance(lowercase , lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __snake_case : Union[str, Any] = 0 # prime factorization of 'number' __snake_case : List[Any] = prime_factorization(lowercase ) __snake_case : Union[str, Any] = min(lowercase ) # precondition assert isinstance(lowercase , lowercase ), "'ans' must been from type int" return ans def lowerCAmelCase__( lowercase : int ) -> int: assert isinstance(lowercase , lowercase ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowercase ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase__( lowercase : Optional[Any] ) -> List[str]: assert isinstance(lowercase , lowercase ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowercase ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase__( lowercase : Dict ) -> Any: assert ( isinstance(lowercase , lowercase ) and (number > 2) and is_even(lowercase ) ), "'number' must been an int, even and > 2" __snake_case : List[Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __snake_case : Dict = get_prime_numbers(lowercase ) __snake_case : Dict = len(lowercase ) # run variable for while-loops. __snake_case : Any = 0 __snake_case : str = None # exit variable. for break up the loops __snake_case : Optional[Any] = True while i < len_pn and loop: __snake_case : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __snake_case : List[str] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowercase , lowercase ) and (len(lowercase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase__( lowercase : List[str] , lowercase : int ) -> Optional[Any]: assert ( isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __snake_case : List[str] = 0 while numbera != 0: __snake_case : str = numbera % numbera __snake_case : Tuple = numbera __snake_case : Union[str, Any] = rest # precondition assert isinstance(lowercase , lowercase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase__( lowercase : str , lowercase : Dict ) -> int: assert ( isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __snake_case : str = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __snake_case : List[Any] = prime_factorization(lowercase ) __snake_case : int = prime_factorization(lowercase ) elif numbera == 1 or numbera == 1: __snake_case : int = [] __snake_case : str = [] __snake_case : Dict = max(lowercase , lowercase ) __snake_case : Optional[Any] = 0 __snake_case : List[Any] = 0 __snake_case : int = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __snake_case : Optional[int] = prime_fac_a.count(lowercase ) __snake_case : Tuple = prime_fac_a.count(lowercase ) for _ in range(max(lowercase , lowercase ) ): ans *= n else: __snake_case : List[str] = prime_fac_a.count(lowercase ) for _ in range(lowercase ): ans *= n done.append(lowercase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __snake_case : Tuple = prime_fac_a.count(lowercase ) for _ in range(lowercase ): ans *= n done.append(lowercase ) # precondition assert isinstance(lowercase , lowercase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase__( lowercase : List[Any] ) -> Tuple: assert isinstance(lowercase , lowercase ) and (n >= 0), "'number' must been a positive int" __snake_case : Union[str, Any] = 0 __snake_case : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowercase ): ans += 1 # precondition assert isinstance(lowercase , lowercase ) and is_prime( lowercase ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Optional[int] ) -> Dict: assert ( is_prime(lowercase ) and is_prime(lowercase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __snake_case : Any = p_number_a + 1 # jump to the next number __snake_case : Optional[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowercase ): number += 1 while number < p_number_a: ans.append(lowercase ) number += 1 # fetch the next prime number. while not is_prime(lowercase ): number += 1 # precondition assert ( isinstance(lowercase , lowercase ) and ans[0] != p_number_a and ans[len(lowercase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase__( lowercase : int ) -> Tuple: assert isinstance(lowercase , lowercase ) and (n >= 1), "'n' must been int and >= 1" __snake_case : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowercase ) # precondition assert ans[0] == 1 and ans[len(lowercase ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase__( lowercase : Optional[Any] ) -> Tuple: assert isinstance(lowercase , lowercase ) and ( number > 1 ), "'number' must been an int and >= 1" __snake_case : List[Any] = get_divisors(lowercase ) # precondition assert ( isinstance(lowercase , lowercase ) and (divisors[0] == 1) and (divisors[len(lowercase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase__( lowercase : Dict , lowercase : Dict ) -> Optional[Any]: assert ( isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __snake_case : int = gcd(abs(lowercase ) , abs(lowercase ) ) # precondition assert ( isinstance(lowercase , lowercase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase__( lowercase : int ) -> Optional[Any]: assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been a int and >= 0" __snake_case : Optional[int] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase__( lowercase : str ) -> Any: assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been an int and >= 0" __snake_case : Union[str, Any] = 0 __snake_case : int = 1 __snake_case : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): __snake_case : Union[str, Any] = ans ans += fiba __snake_case : Optional[int] = tmp return ans
326
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any: __snake_case : Optional[Any] = False __snake_case : Optional[Any] = search_prob __snake_case : str = start_temperate __snake_case : List[Any] = [] __snake_case : str = 0 __snake_case : Dict = None while not search_end: __snake_case : List[Any] = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case : List[Any] = current_state scores.append(lowercase ) iterations += 1 __snake_case : Dict = None __snake_case : str = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor __snake_case : int = neighbors.pop(lowercase ) __snake_case : Optional[Any] = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case : Any = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case : List[str] = picked_neighbor else: __snake_case : Optional[Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case : str = picked_neighbor __snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case : Optional[Any] = True else: __snake_case : str = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(lowercase ) , lowercase ) plt.xlabel("Iterations" ) plt.ylabel("Function values" ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) _UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any: return (3 * x**2) - (6 * y) _UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) _UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
326
1
from __future__ import annotations _UpperCamelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _UpperCamelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def lowerCAmelCase__( lowercase : list[float] ) -> list[float]: __snake_case : Union[str, Any] = [] __snake_case : List[Any] = len(lowercase ) for i in range(lowercase ): __snake_case : float = -1 for j in range(i + 1 , lowercase ): if arr[i] < arr[j]: __snake_case : Dict = arr[j] break result.append(lowercase ) return result def lowerCAmelCase__( lowercase : list[float] ) -> list[float]: __snake_case : Dict = [] for i, outer in enumerate(lowercase ): __snake_case : float = -1 for inner in arr[i + 1 :]: if outer < inner: __snake_case : str = inner break result.append(lowercase ) return result def lowerCAmelCase__( lowercase : list[float] ) -> list[float]: __snake_case : str = len(lowercase ) __snake_case : list[float] = [] __snake_case : list[float] = [-1] * arr_size for index in reversed(range(lowercase ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __snake_case : List[str] = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _UpperCamelCase = ( '''from __main__ import arr, next_greatest_element_slow, ''' '''next_greatest_element_fast, next_greatest_element''' ) print( '''next_greatest_element_slow():''', timeit('''next_greatest_element_slow(arr)''', setup=setup), ) print( '''next_greatest_element_fast():''', timeit('''next_greatest_element_fast(arr)''', setup=setup), ) print( ''' next_greatest_element():''', timeit('''next_greatest_element(arr)''', setup=setup), )
326
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"] UpperCAmelCase_ : Tuple ="FlavaImageProcessor" UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast") def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) __snake_case : List[Any] = kwargs.pop("feature_extractor" ) __snake_case : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = self.image_processor def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __snake_case : Union[str, Any] = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if images is not None: __snake_case : Union[str, Any] = self.image_processor( UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if text is not None and images is not None: encoding.update(UpperCAmelCase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[Any] = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
326
1
class _lowerCamelCase : """simple docstring""" def __init__( self ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = {} def UpperCAmelCase ( self ) -> None: '''simple docstring''' print(self.vertex ) for i in self.vertex: print(UpperCAmelCase , " -> " , " -> ".join([str(UpperCAmelCase ) for j in self.vertex[i]] ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> None: '''simple docstring''' if from_vertex in self.vertex: self.vertex[from_vertex].append(UpperCAmelCase ) else: # else make a new vertex __snake_case : Union[str, Any] = [to_vertex] def UpperCAmelCase ( self ) -> None: '''simple docstring''' __snake_case : Dict = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> None: '''simple docstring''' __snake_case : int = True print(UpperCAmelCase , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": _UpperCamelCase = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('''DFS:''') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
326
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } _UpperCamelCase = { '''camembert-base''': 512, } _UpperCamelCase = '''▁''' class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : str =["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' __snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token __snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) __snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase ) ) __snake_case : Dict = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __snake_case : Optional[int] = len(self.fairseq_tokens_to_ids ) __snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Dict = [self.cls_token_id] __snake_case : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1] def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(UpperCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = [] __snake_case : Union[str, Any] = "" __snake_case : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase ) + token __snake_case : List[Any] = True __snake_case : Union[str, Any] = [] else: current_sub_tokens.append(UpperCAmelCase ) __snake_case : int = False out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def __getstate__( self ) -> List[Any]: '''simple docstring''' __snake_case : str = self.__dict__.copy() __snake_case : Optional[Any] = None return state def __setstate__( self , UpperCAmelCase ) -> str: '''simple docstring''' __snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case : List[str] = {} __snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case : Optional[Any] = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: __snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,)
326
1
def lowerCAmelCase__( lowercase : str , lowercase : list[str] ) -> str: __snake_case : Dict = "" for word_or_phrase in separated: if not isinstance(lowercase , lowercase ): raise Exception("join() accepts only strings to be joined" ) joined += word_or_phrase + separator return joined.strip(lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
326
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool: __snake_case : List[str] = len(lowercase ) __snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __snake_case : Optional[Any] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __snake_case : Union[str, Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __snake_case : List[str] = subset[i - 1][j] if arr[i - 1] <= j: __snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
326
1
def lowerCAmelCase__( lowercase : float , lowercase : float , lowercase : int ) -> float: if principal <= 0: raise Exception("Principal borrowed must be > 0" ) if rate_per_annum < 0: raise Exception("Rate of interest must be >= 0" ) if years_to_repay <= 0 or not isinstance(lowercase , lowercase ): raise Exception("Years to repay must be an integer > 0" ) # Yearly rate is divided by 12 to get monthly rate __snake_case : Optional[Any] = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __snake_case : Tuple = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
326
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _UpperCamelCase = 4 _UpperCamelCase = 3 class _lowerCamelCase ( a ): """simple docstring""" pass def lowerCAmelCase__( lowercase : List[str] ) -> Any: for shard in shards: for i in range(lowercase ): yield {"i": i, "shard": shard} def lowerCAmelCase__( ) -> Optional[int]: __snake_case : List[Any] = int(os.environ["RANK"] ) __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : List[str] = ArgumentParser() parser.add_argument("--streaming" , type=lowercase ) parser.add_argument("--local_rank" , type=lowercase ) parser.add_argument("--num_workers" , type=lowercase , default=0 ) __snake_case : Any = parser.parse_args() __snake_case : Dict = args.streaming __snake_case : Union[str, Any] = args.num_workers __snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]} __snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase ) if not streaming: __snake_case : Any = Dataset.from_list(list(lowercase ) ) __snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase ) __snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase ) __snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __snake_case : List[str] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __snake_case : Dict = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
326
1
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=99 , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=9 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase=8 , UpperCAmelCase=0.1 , UpperCAmelCase=0.002 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = parent __snake_case : str = batch_size __snake_case : Tuple = encoder_seq_length __snake_case : Dict = decoder_seq_length # For common tests __snake_case : Dict = self.decoder_seq_length __snake_case : Optional[int] = is_training __snake_case : int = use_attention_mask __snake_case : Union[str, Any] = use_labels __snake_case : str = vocab_size __snake_case : int = hidden_size __snake_case : str = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : List[Any] = d_ff __snake_case : Optional[int] = relative_attention_num_buckets __snake_case : List[Any] = dropout_rate __snake_case : Tuple = initializer_factor __snake_case : int = eos_token_id __snake_case : Optional[int] = pad_token_id __snake_case : str = decoder_start_token_id __snake_case : Dict = None __snake_case : Union[str, Any] = decoder_layers def UpperCAmelCase ( self ) -> Any: '''simple docstring''' return TaConfig.from_pretrained("google/umt5-base" ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: __snake_case : int = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __snake_case : List[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __snake_case : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase ) if decoder_head_mask is None: __snake_case : int = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase ) if cross_attn_head_mask is None: __snake_case : List[Any] = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __snake_case : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __snake_case : Any = input_ids.clamp(self.pad_token_id + 1 ) __snake_case : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) __snake_case : Optional[Any] = self.get_config() __snake_case : Tuple = config.num_attention_heads __snake_case : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, input_dict def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case , __snake_case : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = UMTaModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : Tuple = model( input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , ) __snake_case : Union[str, Any] = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase ) __snake_case : Any = result.last_hidden_state __snake_case : str = result.past_key_values __snake_case : List[Any] = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval() # first forward pass __snake_case : Any = model(UpperCAmelCase , use_cache=UpperCAmelCase ) __snake_case : str = model(UpperCAmelCase ) __snake_case : List[Any] = model(UpperCAmelCase , use_cache=UpperCAmelCase ) self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) ) self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 ) __snake_case , __snake_case : Optional[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __snake_case : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __snake_case : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case : Tuple = model(UpperCAmelCase )["last_hidden_state"] __snake_case : Optional[int] = model(UpperCAmelCase , past_key_values=UpperCAmelCase )["last_hidden_state"] # select random slice __snake_case : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case : Any = output_from_no_past[:, -1, random_slice_idx].detach() __snake_case : str = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , ) -> List[str]: '''simple docstring''' __snake_case : str = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval() __snake_case : int = model(**UpperCAmelCase )["last_hidden_state"] self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() ) @require_torch class _lowerCamelCase ( a , a , a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Any =( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) UpperCAmelCase_ : List[Any] =(UMTaForConditionalGeneration,) if is_torch_available() else () UpperCAmelCase_ : int =( { "conversational": UMTaForConditionalGeneration, "feature-extraction": UMTaModel, "summarization": UMTaForConditionalGeneration, "text2text-generation": UMTaForConditionalGeneration, "translation": UMTaForConditionalGeneration, "question-answering": UMTaForQuestionAnswering, } if is_torch_available() else {} ) UpperCAmelCase_ : int =True UpperCAmelCase_ : Optional[int] =False UpperCAmelCase_ : int =False UpperCAmelCase_ : Optional[int] =True UpperCAmelCase_ : int =True # The small UMT5 model needs higher percentages for CPU/MP tests UpperCAmelCase_ : List[str] =[0.8, 0.9] def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : str = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs() __snake_case : str = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Union[str, Any] = ["encoder_attentions", "decoder_attentions", "cross_attentions"] __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() __snake_case : Any = config_and_inputs[0] __snake_case : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase ).eval() model.to(UpperCAmelCase ) __snake_case : Union[str, Any] = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ), } for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ): __snake_case : Tuple = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __snake_case : Dict = torch.ones( config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ) __snake_case : List[Any] = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step __snake_case : Tuple = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Union[str, Any] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=UpperCAmelCase ).to(UpperCAmelCase ) __snake_case : int = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=UpperCAmelCase , legacy=UpperCAmelCase ) __snake_case : Union[str, Any] = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] __snake_case : List[Any] = tokenizer(UpperCAmelCase , return_tensors="pt" , padding=UpperCAmelCase ).input_ids # fmt: off __snake_case : List[str] = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase ) __snake_case : str = model.generate(input_ids.to(UpperCAmelCase ) ) __snake_case : List[str] = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] __snake_case : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase ) self.assertEqual(UpperCAmelCase , UpperCAmelCase )
326
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int: __snake_case : List[Any] = limit + 1 __snake_case : List[str] = [0] * limit for first_term in range(1 , lowercase ): for n in range(lowercase , lowercase , lowercase ): __snake_case : Union[str, Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
326
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _lowerCamelCase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Tuple: '''simple docstring''' super().__init__() __snake_case : int = pad_token_id __snake_case : List[Any] = max_length __snake_case : Optional[Any] = vocab __snake_case : Optional[int] = merges __snake_case : Optional[Any] = BytePairTokenizer(UpperCAmelCase , UpperCAmelCase , sequence_length=UpperCAmelCase ) @classmethod def UpperCAmelCase ( cls , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = [" ".join(UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()] __snake_case : Tuple = tokenizer.get_vocab() return cls(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) @classmethod def UpperCAmelCase ( cls , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[int] = GPTaTokenizer.from_pretrained(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) return cls.from_tokenizer(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) @classmethod def UpperCAmelCase ( cls , UpperCAmelCase ) -> str: '''simple docstring''' return cls(**UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[str]: '''simple docstring''' __snake_case : List[str] = self.tf_tokenizer(UpperCAmelCase ) __snake_case : Any = tf.ones_like(UpperCAmelCase ) if self.pad_token_id is not None: # pad the tokens up to max length __snake_case : str = max_length if max_length is not None else self.max_length if max_length is not None: __snake_case , __snake_case : str = pad_model_inputs( UpperCAmelCase , max_seq_length=UpperCAmelCase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
326
from __future__ import annotations def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]: __snake_case : List[str] = word_bank or [] # create a table __snake_case : int = len(lowercase ) + 1 __snake_case : list[list[list[str]]] = [] for _ in range(lowercase ): table.append([] ) # seed value __snake_case : Optional[int] = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowercase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowercase )] == word: __snake_case : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowercase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowercase )]: combination.reverse() return table[len(lowercase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
326
1
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : str , lowercase : str , lowercase : Path , lowercase : str = None , lowercase : str = None , lowercase : str = None , ) -> Optional[int]: if config_name_or_path is None: __snake_case : List[str] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: __snake_case : List[Any] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: __snake_case : Union[str, Any] = question_encoder_name_or_path __snake_case : int = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. __snake_case : Tuple = RagConfig.from_pretrained(lowercase ) __snake_case : int = AutoConfig.from_pretrained(lowercase ) __snake_case : str = AutoConfig.from_pretrained(lowercase ) __snake_case : Dict = gen_config __snake_case : Optional[Any] = question_encoder_config __snake_case : List[str] = model_class.from_pretrained_question_encoder_generator( lowercase , lowercase , config=lowercase ) rag_model.save_pretrained(lowercase ) # Sanity check. model_class.from_pretrained(lowercase ) # Save tokenizers. __snake_case : Tuple = AutoTokenizer.from_pretrained(lowercase ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) __snake_case : int = AutoTokenizer.from_pretrained(lowercase ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--model_type''', choices=['''rag_sequence''', '''rag_token'''], required=True, type=str, help='''RAG model type: rag_sequence, rag_token''', ) parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''') parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''') parser.add_argument( '''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier''' ) parser.add_argument( '''--generator_tokenizer_name_or_path''', type=str, help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''', ) parser.add_argument( '''--question_encoder_tokenizer_name_or_path''', type=str, help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''', ) parser.add_argument( '''--config_name_or_path''', type=str, help=( '''Identifier of the model config to use, if not provided, resolves to a base config for a given''' ''' ``model_type``''' ), ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
326
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = parent __snake_case : Tuple = batch_size __snake_case : List[str] = seq_length __snake_case : Optional[int] = is_training __snake_case : int = use_attention_mask __snake_case : Union[str, Any] = use_token_type_ids __snake_case : Any = use_labels __snake_case : List[str] = vocab_size __snake_case : int = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : List[Any] = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : Dict = initializer_range __snake_case : List[Any] = num_choices __snake_case : Union[str, Any] = rescale_embeddings __snake_case : List[Any] = attention_type __snake_case : str = use_bias __snake_case : Dict = block_size __snake_case : Optional[Any] = num_random_blocks def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_attention_mask: __snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Union[str, Any] = None if self.use_token_type_ids: __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[int] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs __snake_case : int = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class _lowerCamelCase ( a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) UpperCAmelCase_ : Dict =False UpperCAmelCase_ : str =False def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Dict = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Any: '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_hidden_states_output() @slow def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = model_class(UpperCAmelCase ) @jax.jit def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ): return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase ) with self.subTest("JIT Enabled" ): __snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int: '''simple docstring''' if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
326
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
326
import argparse import datetime def lowerCAmelCase__( lowercase : str ) -> str: __snake_case : int = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } __snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase ) < 11: raise ValueError("Must be 10 characters long" ) # Get month __snake_case : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12" ) __snake_case : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get day __snake_case : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31" ) # Get second separator __snake_case : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get year __snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation __snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) ) # Start math if m <= 2: __snake_case : Optional[Any] = y - 1 __snake_case : Tuple = m + 12 # maths var __snake_case : int = int(str(lowercase )[:2] ) __snake_case : int = int(str(lowercase )[2:] ) __snake_case : int = int(2.6 * m - 5.3_9 ) __snake_case : int = int(c / 4 ) __snake_case : int = int(k / 4 ) __snake_case : int = int(d + k ) __snake_case : int = int(t + u + v + x ) __snake_case : int = int(z - (2 * c) ) __snake_case : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response __snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) _UpperCamelCase = parser.parse_args() zeller(args.date_input)
326
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _UpperCamelCase = logging.getLogger(__name__) @dataclass class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : str =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase_ : Optional[str] =field( default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase_ : Optional[str] =field( default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase_ : Optional[str] =field( default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) UpperCAmelCase_ : bool =field(default=a , metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase_ : bool =field(default=a , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : str =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase_ : Optional[str] =field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) UpperCAmelCase_ : Optional[int] =field( default=1_024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase_ : Optional[int] =field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase_ : Optional[int] =field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) UpperCAmelCase_ : Optional[int] =field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase_ : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase_ : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase_ : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase_ : Optional[str] =field(default=a , metadata={"help": "Source language id for translation."} ) UpperCAmelCase_ : Optional[str] =field(default=a , metadata={"help": "Target language id for translation."} ) UpperCAmelCase_ : Optional[int] =field(default=a , metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase_ : bool =field( default=a , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def lowerCAmelCase__( lowercase : Optional[int] , lowercase : List[Any] , lowercase : int ) -> Optional[Any]: logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(lowercase , os.path.join(lowercase , f"""{split}_results.json""" ) ) def lowerCAmelCase__( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case : Any = parser.parse_args_into_dataclasses() check_output_dir(lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , lowercase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case : Union[str, Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(lowercase , lowercase , lowercase ): assert hasattr(lowercase , lowercase ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(lowercase , lowercase , getattr(lowercase , lowercase ) ) __snake_case : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=lowercase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __snake_case : Optional[Any] = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase , lowercase ): __snake_case : Optional[int] = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __snake_case : Dict = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __snake_case : Dict = SeqaSeqDataset # Get datasets __snake_case : Optional[int] = ( dataset_class( lowercase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) __snake_case : Tuple = ( dataset_class( lowercase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __snake_case : Tuple = ( dataset_class( lowercase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer __snake_case : List[Any] = ( build_compute_metrics_fn(data_args.task , lowercase ) if training_args.predict_with_generate else None ) __snake_case : Union[str, Any] = SeqaSeqTrainer( model=lowercase , args=lowercase , data_args=lowercase , train_dataset=lowercase , eval_dataset=lowercase , data_collator=SeqaSeqDataCollator( lowercase , lowercase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase , tokenizer=lowercase , ) __snake_case : Any = {} # Training if training_args.do_train: logger.info("*** Train ***" ) __snake_case : Optional[int] = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __snake_case : Dict = train_result.metrics __snake_case : str = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , lowercase , training_args.output_dir ) all_metrics.update(lowercase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) __snake_case : Optional[int] = trainer.evaluate(metric_key_prefix="val" ) __snake_case : Dict = data_args.n_val __snake_case : Tuple = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , lowercase , training_args.output_dir ) all_metrics.update(lowercase ) if training_args.do_predict: logger.info("*** Predict ***" ) __snake_case : int = trainer.predict(test_dataset=lowercase , metric_key_prefix="test" ) __snake_case : Dict = test_output.metrics __snake_case : Optional[Any] = data_args.n_test if trainer.is_world_process_zero(): __snake_case : Any = round(metrics["test_loss"] , 4 ) handle_metrics("test" , lowercase , training_args.output_dir ) all_metrics.update(lowercase ) if training_args.predict_with_generate: __snake_case : List[str] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) __snake_case : str = lmap(str.strip , lowercase ) write_txt_file(lowercase , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(lowercase , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def lowerCAmelCase__( lowercase : int ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
326
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int: if index == r: for j in range(lowercase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __snake_case : Union[str, Any] = arr[i] combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]: # A temporary array to store all combination one by one __snake_case : Tuple = [0] * r # Print all combination using temporary array 'data[]' combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 ) if __name__ == "__main__": # Driver code to check the function above _UpperCamelCase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
326
1
import os _UpperCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000} def lowerCAmelCase__( lowercase : str ) -> int: __snake_case : Optional[Any] = 0 __snake_case : Any = 0 while index < len(lowercase ) - 1: __snake_case : Union[str, Any] = SYMBOLS[numerals[index]] __snake_case : Any = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def lowerCAmelCase__( lowercase : int ) -> str: __snake_case : int = "" __snake_case : str = num // 1000 numerals += m_count * "M" num %= 1000 __snake_case : str = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 __snake_case : int = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def lowerCAmelCase__( lowercase : str = "/p089_roman.txt" ) -> int: __snake_case : Optional[int] = 0 with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea: __snake_case : Tuple = filea.readlines() for line in lines: __snake_case : List[str] = line.strip() __snake_case : List[Any] = parse_roman_numerals(lowercase ) __snake_case : Union[str, Any] = generate_roman_numerals(lowercase ) savings += len(lowercase ) - len(lowercase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
326
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] _UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def lowerCAmelCase__( lowercase : str ) -> Optional[Any]: __snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" ) return sd def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict: __snake_case : Tuple = OrderedDict() __snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __snake_case : Optional[Any] = key for name_pair in rename_keys_prefix: __snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __snake_case : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __snake_case : List[Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]: assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: __snake_case : Any = "pretraining" if "vcr" in checkpoint_path: __snake_case : Optional[Any] = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: __snake_case : Tuple = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: __snake_case : Any = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 512} __snake_case : Any = "multichoice" elif "vqa_advanced" in checkpoint_path: __snake_case : List[Any] = {"visual_embedding_dim": 2048} __snake_case : Optional[Any] = "vqa_advanced" elif "vqa" in checkpoint_path: __snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129} __snake_case : Union[str, Any] = "vqa" elif "nlvr" in checkpoint_path: __snake_case : Tuple = { "visual_embedding_dim": 1024, "num_labels": 2, } __snake_case : List[Any] = "nlvr" __snake_case : Union[str, Any] = VisualBertConfig(**lowercase ) # Load State Dict __snake_case : Any = load_state_dict(lowercase ) __snake_case : Dict = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": __snake_case : Optional[Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": __snake_case : Tuple = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": __snake_case : Tuple = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": __snake_case : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') _UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
326
1
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _UpperCamelCase = 8 def lowerCAmelCase__( lowercase : Any , lowercase : Tuple=BITS ) -> Tuple: __snake_case : int = x.device __snake_case : List[Any] = (x * 255).int().clamp(0 , 255 ) __snake_case : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase ) __snake_case : Tuple = rearrange(lowercase , "d -> d 1 1" ) __snake_case : Any = rearrange(lowercase , "b c h w -> b c 1 h w" ) __snake_case : str = ((x & mask) != 0).float() __snake_case : Optional[int] = rearrange(lowercase , "b c d h w -> b (c d) h w" ) __snake_case : Optional[Any] = bits * 2 - 1 return bits def lowerCAmelCase__( lowercase : List[str] , lowercase : Optional[Any]=BITS ) -> str: __snake_case : str = x.device __snake_case : Any = (x > 0).int() __snake_case : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase , dtype=torch.intaa ) __snake_case : Optional[int] = rearrange(lowercase , "d -> d 1 1" ) __snake_case : Optional[Any] = rearrange(lowercase , "b (c d) h w -> b c d h w" , d=8 ) __snake_case : Tuple = reduce(x * mask , "b c d h w -> b c h w" , "sum" ) return (dec / 255).clamp(0.0 , 1.0 ) def lowerCAmelCase__( self : Union[str, Any] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : float = 0.0 , lowercase : bool = True , lowercase : Tuple=None , lowercase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) __snake_case : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas __snake_case : Any = self.alphas_cumprod[timestep] __snake_case : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod __snake_case : Optional[int] = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" __snake_case : Optional[int] = self.bit_scale if self.config.clip_sample: __snake_case : Union[str, Any] = torch.clamp(lowercase , -scale , lowercase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) __snake_case : List[str] = self._get_variance(lowercase , lowercase ) __snake_case : Tuple = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide __snake_case : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case : Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 __snake_case : List[Any] = model_output.device if torch.is_tensor(lowercase ) else "cpu" __snake_case : List[str] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowercase ).to(lowercase ) __snake_case : Tuple = self._get_variance(lowercase , lowercase ) ** 0.5 * eta * noise __snake_case : str = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase ) def lowerCAmelCase__( self : List[str] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : Optional[Any]="epsilon" , lowercase : Optional[int]=None , lowercase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]: __snake_case : List[Any] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: __snake_case , __snake_case : Any = torch.split(lowercase , sample.shape[1] , dim=1 ) else: __snake_case : Any = None # 1. compute alphas, betas __snake_case : str = self.alphas_cumprod[t] __snake_case : int = self.alphas_cumprod[t - 1] if t > 0 else self.one __snake_case : str = 1 - alpha_prod_t __snake_case : Tuple = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": __snake_case : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": __snake_case : List[Any] = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" __snake_case : int = self.bit_scale if self.config.clip_sample: __snake_case : Optional[int] = torch.clamp(lowercase , -scale , lowercase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __snake_case : Dict = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t __snake_case : Union[str, Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __snake_case : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __snake_case : int = 0 if t > 0: __snake_case : Dict = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowercase ).to(model_output.device ) __snake_case : Optional[int] = (self._get_variance(lowercase , predicted_variance=lowercase ) ** 0.5) * noise __snake_case : Tuple = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase ) class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> List[str]: '''simple docstring''' super().__init__() __snake_case : Optional[int] = bit_scale __snake_case : Tuple = ( ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]: '''simple docstring''' __snake_case : Optional[int] = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , ) __snake_case : List[Any] = decimal_to_bits(UpperCAmelCase ) * self.bit_scale __snake_case : Dict = latents.to(self.device ) self.scheduler.set_timesteps(UpperCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual __snake_case : Optional[Any] = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 __snake_case : Any = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample __snake_case : List[str] = bits_to_decimal(UpperCAmelCase ) if output_type == "pil": __snake_case : Optional[Any] = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
326
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple: # Load configuration defined in the metadata file with open(lowercase ) as metadata_file: __snake_case : int = json.load(lowercase ) __snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] ) # Load in the weights from the checkpoint_path __snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"] # Load the entity vocab file __snake_case : Tuple = load_original_entity_vocab(lowercase ) # add an entry for [MASK2] __snake_case : Optional[int] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 __snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks __snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase ) __snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(lowercase ) with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f: __snake_case : Tuple = json.load(lowercase ) __snake_case : List[Any] = "MLukeTokenizer" with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(lowercase , lowercase ) __snake_case : Any = MLukeTokenizer.from_pretrained(lowercase ) # Initialize the embeddings of the special tokens __snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0] __snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0] __snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"] __snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: __snake_case : List[Any] = state_dict[bias_name] __snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 ) __snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 ) __snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self.""" __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] __snake_case : str = state_dict[prefix + matrix_name] __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"] __snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Any = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' __snake_case : List[Any] = state_dict["entity_predictions.bias"] __snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) __snake_case : Any = LukeForMaskedLM(config=lowercase ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) __snake_case : int = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): __snake_case : str = state_dict[key] else: __snake_case : str = state_dict[key] __snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase ) if set(lowercase ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(lowercase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs __snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" ) __snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." __snake_case : Union[str, Any] = (0, 9) __snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : Any = model(**lowercase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : Optional[Any] = torch.Size((1, 33, 768) ) __snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : str = torch.Size((1, 1, 768) ) __snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction __snake_case : str = MLukeTokenizer.from_pretrained(lowercase ) __snake_case : Dict = "Tokyo is the capital of <mask>." __snake_case : Union[str, Any] = (24, 30) __snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : int = model(**lowercase ) __snake_case : Dict = encoding["input_ids"][0].tolist() __snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) __snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase ) __snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item() __snake_case : Optional[int] = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase ) ) model.save_pretrained(lowercase ) def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]: __snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"] __snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )] __snake_case : Any = {} for entry in data: __snake_case : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: __snake_case : Optional[int] = entity_id break __snake_case : Union[str, Any] = f"""{language}:{entity_name}""" __snake_case : Any = entity_id return new_mapping if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) _UpperCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
def lowerCAmelCase__( lowercase : int , lowercase : int ) -> int: return number | (1 << position) def lowerCAmelCase__( lowercase : int , lowercase : int ) -> int: return number & ~(1 << position) def lowerCAmelCase__( lowercase : int , lowercase : int ) -> int: return number ^ (1 << position) def lowerCAmelCase__( lowercase : int , lowercase : int ) -> bool: return ((number >> position) & 1) == 1 def lowerCAmelCase__( lowercase : int , lowercase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
326
from maths.prime_factors import prime_factors def lowerCAmelCase__( lowercase : int ) -> int: if not isinstance(lowercase , lowercase ): __snake_case : Optional[int] = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(lowercase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
326
1
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCAmelCase__( lowercase : int ) -> bool: __snake_case : int = int(number**0.5 ) return number == sq * sq def lowerCAmelCase__( lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> tuple[int, int]: __snake_case : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __snake_case : int = x_den * y_den * z_den __snake_case : int = gcd(lowercase , lowercase ) top //= hcf bottom //= hcf return top, bottom def lowerCAmelCase__( lowercase : int = 35 ) -> int: __snake_case : set = set() __snake_case : int __snake_case : Fraction = Fraction(0 ) __snake_case : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __snake_case : Any = x_num * y_den + x_den * y_num __snake_case : Optional[int] = x_den * y_den __snake_case : Any = gcd(lowercase , lowercase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __snake_case : Union[str, Any] = add_three( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) unique_s.add(lowercase ) # n=2 __snake_case : Union[str, Any] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __snake_case : List[str] = x_den * x_den * y_den * y_den if is_sq(lowercase ) and is_sq(lowercase ): __snake_case : List[Any] = int(sqrt(lowercase ) ) __snake_case : List[str] = int(sqrt(lowercase ) ) __snake_case : Any = gcd(lowercase , lowercase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __snake_case : Optional[Any] = add_three( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) unique_s.add(lowercase ) # n=-1 __snake_case : Union[str, Any] = x_num * y_num __snake_case : str = x_den * y_num + x_num * y_den __snake_case : List[str] = gcd(lowercase , lowercase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __snake_case : str = add_three( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) unique_s.add(lowercase ) # n=2 __snake_case : Any = x_num * x_num * y_num * y_num __snake_case : List[str] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowercase ) and is_sq(lowercase ): __snake_case : Union[str, Any] = int(sqrt(lowercase ) ) __snake_case : str = int(sqrt(lowercase ) ) __snake_case : str = gcd(lowercase , lowercase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __snake_case : List[str] = add_three( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) unique_s.add(lowercase ) for num, den in unique_s: total += Fraction(lowercase , lowercase ) return total.denominator + total.numerator if __name__ == "__main__": print(F'''{solution() = }''')
326
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) __snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" ) __snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids __snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids __snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) __snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits __snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean() __snake_case : Any = -(labels.shape[-1] * loss.item()) __snake_case : List[str] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
326
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu _UpperCamelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: _UpperCamelCase = json.load(f) @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict: '''simple docstring''' return FSMTTokenizer.from_pretrained(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' __snake_case : List[str] = FSMTForConditionalGeneration.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> str: '''simple docstring''' __snake_case : Union[str, Any] = F"""facebook/wmt19-{pair}""" __snake_case : Dict = self.get_tokenizer(UpperCAmelCase ) __snake_case : Union[str, Any] = self.get_model(UpperCAmelCase ) __snake_case : Optional[int] = bleu_data[pair]["src"] __snake_case : int = bleu_data[pair]["tgt"] __snake_case : str = tokenizer(UpperCAmelCase , return_tensors="pt" , truncation=UpperCAmelCase , padding="longest" ).to(UpperCAmelCase ) __snake_case : List[str] = model.generate( input_ids=batch.input_ids , num_beams=8 , ) __snake_case : str = tokenizer.batch_decode( UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) __snake_case : Union[str, Any] = calculate_bleu(UpperCAmelCase , UpperCAmelCase ) print(UpperCAmelCase ) self.assertGreaterEqual(scores["bleu"] , UpperCAmelCase )
326
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]: '''simple docstring''' super().__init__(UpperCAmelCase ) __snake_case : Optional[int] = proj_size __snake_case : str = CLIPVisionModel(UpperCAmelCase ) __snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase ) __snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size ) __snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]: '''simple docstring''' __snake_case : int = self.model(pixel_values=UpperCAmelCase ) __snake_case : Optional[int] = clip_output.pooler_output __snake_case : Any = self.mapper(latent_states[:, None] ) __snake_case : Any = self.final_layer_norm(UpperCAmelCase ) __snake_case : str = self.proj_out(UpperCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : List[Any] = (config.num_hidden_layers + 1) // 5 __snake_case : Dict = config.hidden_size __snake_case : str = 1 __snake_case : List[Any] = nn.ModuleList( [ BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase ) for _ in range(UpperCAmelCase ) ] ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' for block in self.blocks: __snake_case : int = block(UpperCAmelCase ) return hidden_states
326
1
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> str: '''simple docstring''' __snake_case : int = parent __snake_case : Optional[int] = batch_size __snake_case : Union[str, Any] = seq_length __snake_case : Dict = is_training __snake_case : int = use_input_mask __snake_case : Tuple = use_token_type_ids __snake_case : Any = use_labels __snake_case : Union[str, Any] = vocab_size __snake_case : List[Any] = hidden_size __snake_case : Tuple = num_hidden_layers __snake_case : Tuple = num_attention_heads __snake_case : int = intermediate_size __snake_case : int = hidden_act __snake_case : List[Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Dict = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : str = initializer_range __snake_case : Optional[Any] = num_labels __snake_case : List[Any] = num_choices __snake_case : Dict = scope def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = None if self.use_input_mask: __snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[str] = None __snake_case : List[Any] = None __snake_case : Any = None if self.use_labels: __snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ) -> Any: '''simple docstring''' return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : int = BioGptModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase ) __snake_case : str = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[str]: '''simple docstring''' __snake_case : List[Any] = BioGptForCausalLM(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : int = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : str = BioGptModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() # create attention mask __snake_case : int = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase ) __snake_case : int = self.seq_length // 2 __snake_case : Optional[Any] = 0 # first forward pass __snake_case , __snake_case : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids __snake_case : str = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __snake_case : List[Any] = ids_tensor((1,) , UpperCAmelCase ).item() + 1 __snake_case : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __snake_case : List[str] = random_other_next_tokens # append to next input_ids and attn_mask __snake_case : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case : Dict = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase )] , dim=1 , ) # get two different outputs __snake_case : str = model(UpperCAmelCase , attention_mask=UpperCAmelCase )["last_hidden_state"] __snake_case : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase , attention_mask=UpperCAmelCase )["last_hidden_state"] # select random slice __snake_case : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case : Dict = output_from_no_past[:, -1, random_slice_idx].detach() __snake_case : Optional[int] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[Any] = BioGptModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval() __snake_case : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase ) # first forward pass __snake_case : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase ) __snake_case , __snake_case : Union[str, Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case : str = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __snake_case : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __snake_case : int = model(UpperCAmelCase , attention_mask=UpperCAmelCase )["last_hidden_state"] __snake_case : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[ "last_hidden_state" ] # select random slice __snake_case : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]: '''simple docstring''' __snake_case : str = BioGptForCausalLM(UpperCAmelCase ) model.to(UpperCAmelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() __snake_case : List[Any] = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase ( self , UpperCAmelCase , *UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = BioGptModel(UpperCAmelCase ) __snake_case : Tuple = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.num_labels __snake_case : Tuple = BioGptForTokenClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Dict = config_and_inputs __snake_case : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowerCamelCase ( a , a , a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : List[Any] =( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) UpperCAmelCase_ : Union[str, Any] =(BioGptForCausalLM,) if is_torch_available() else () UpperCAmelCase_ : Tuple =( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase_ : Optional[int] =False def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[Any] = BioGptModelTester(self ) __snake_case : List[str] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Optional[int] = type self.model_tester.create_and_check_model(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase , gradient_checkpointing=UpperCAmelCase ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase ) @slow def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : str = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase ) __snake_case : Any = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __snake_case : Dict = "left" # Define PAD Token = EOS Token = 50256 __snake_case : List[Any] = tokenizer.eos_token __snake_case : List[Any] = model.config.eos_token_id # use different length sentences to test batching __snake_case : Optional[int] = [ "Hello, my dog is a little", "Today, I", ] __snake_case : List[Any] = tokenizer(UpperCAmelCase , return_tensors="pt" , padding=UpperCAmelCase ) __snake_case : str = inputs["input_ids"].to(UpperCAmelCase ) __snake_case : Optional[int] = model.generate( input_ids=UpperCAmelCase , attention_mask=inputs["attention_mask"].to(UpperCAmelCase ) , ) __snake_case : List[str] = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase ) __snake_case : Union[str, Any] = model.generate(input_ids=UpperCAmelCase ) __snake_case : Optional[Any] = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __snake_case : str = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase ) __snake_case : int = model.generate(input_ids=UpperCAmelCase , max_length=model.config.max_length - num_paddings ) __snake_case : Dict = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) __snake_case : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase ) __snake_case : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase ) __snake_case : List[str] = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : int = BioGptModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = 3 __snake_case : List[Any] = input_dict["input_ids"] __snake_case : List[Any] = input_ids.ne(1 ).to(UpperCAmelCase ) __snake_case : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case : str = BioGptForSequenceClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = 3 __snake_case : Optional[int] = "multi_label_classification" __snake_case : List[str] = input_dict["input_ids"] __snake_case : Tuple = input_ids.ne(1 ).to(UpperCAmelCase ) __snake_case : Optional[Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __snake_case : List[str] = BioGptForSequenceClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() __snake_case : str = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Dict = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __snake_case : Tuple = torch.tensor([[2, 4805, 9, 656, 21]] ) __snake_case : List[str] = model(UpperCAmelCase )[0] __snake_case : Optional[int] = 42384 __snake_case : Optional[int] = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase ) __snake_case : int = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) ) @slow def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Dict = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __snake_case : List[str] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase ) torch.manual_seed(0 ) __snake_case : int = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase ) __snake_case : List[str] = model.generate( **UpperCAmelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=UpperCAmelCase , ) __snake_case : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase ) __snake_case : List[Any] = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCAmelCase , UpperCAmelCase )
326
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
326
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _UpperCamelCase = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
326
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = torch.device('''cpu''') def lowerCAmelCase__( ) -> Any: __snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCAmelCase__( lowercase : Dict ) -> List[Any]: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] ) def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]: __snake_case : List[Any] = dct.pop(lowercase ) __snake_case : List[Any] = val def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple: __snake_case : Optional[Any] = [] for k in state_dict.keys(): __snake_case : Union[str, Any] = k if ".pwconv" in k: __snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: __snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: __snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: __snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: __snake_case : int = k_new.split("." ) if ls[2].isdigit(): __snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: __snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]: __snake_case : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __snake_case : Tuple = 1000 __snake_case : Any = "huggingface/label-files" __snake_case : int = "imagenet-1k-id2label.json" __snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) __snake_case : str = {int(lowercase ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __snake_case : Optional[Any] = [3, 3, 6, 4] __snake_case : Optional[int] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __snake_case : List[str] = [3, 3, 9, 6] __snake_case : Optional[Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __snake_case : Optional[int] = [4, 3, 10, 5] __snake_case : Dict = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __snake_case : str = [4, 4, 12, 6] __snake_case : Optional[Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): __snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase ) else: __snake_case : Tuple = torch.load(lowercase , map_location="cpu" ) __snake_case : Optional[int] = checkpoint __snake_case : Any = create_rename_keys(lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) # load HuggingFace model __snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval() hf_model.load_state_dict(lowercase ) # prepare test inputs __snake_case : Optional[Any] = prepare_img() __snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" ) __snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" ) # compare outputs from both models __snake_case : str = get_expected_output(lowercase ) __snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _UpperCamelCase = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
326
1
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase ) for s in shape] )}.npy""" def UpperCAmelCase ( self ) -> int: '''simple docstring''' super().tearDown() gc.collect() def UpperCAmelCase ( self , UpperCAmelCase=0 , UpperCAmelCase=(4, 4, 64, 64) , UpperCAmelCase=False ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = jnp.bfloataa if fpaa else jnp.floataa __snake_case : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) , dtype=UpperCAmelCase ) return image def UpperCAmelCase ( self , UpperCAmelCase=False , UpperCAmelCase="CompVis/stable-diffusion-v1-4" ) -> List[Any]: '''simple docstring''' __snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa __snake_case : int = "bf16" if fpaa else None __snake_case , __snake_case : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained( UpperCAmelCase , subfolder="unet" , dtype=UpperCAmelCase , revision=UpperCAmelCase ) return model, params def UpperCAmelCase ( self , UpperCAmelCase=0 , UpperCAmelCase=(4, 77, 768) , UpperCAmelCase=False ) -> str: '''simple docstring''' __snake_case : str = jnp.bfloataa if fpaa else jnp.floataa __snake_case : int = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) , dtype=UpperCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case , __snake_case : Dict = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=UpperCAmelCase ) __snake_case : Any = self.get_latents(UpperCAmelCase , fpaa=UpperCAmelCase ) __snake_case : Union[str, Any] = self.get_encoder_hidden_states(UpperCAmelCase , fpaa=UpperCAmelCase ) __snake_case : str = model.apply( {"params": params} , UpperCAmelCase , jnp.array(UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase , ).sample assert sample.shape == latents.shape __snake_case : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __snake_case : List[str] = jnp.array(UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: '''simple docstring''' __snake_case , __snake_case : Any = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=UpperCAmelCase ) __snake_case : Any = self.get_latents(UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=UpperCAmelCase ) __snake_case : Union[str, Any] = self.get_encoder_hidden_states(UpperCAmelCase , shape=(4, 77, 1024) , fpaa=UpperCAmelCase ) __snake_case : Dict = model.apply( {"params": params} , UpperCAmelCase , jnp.array(UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase , ).sample assert sample.shape == latents.shape __snake_case : Tuple = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __snake_case : Dict = jnp.array(UpperCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-2 )
326
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) _UpperCamelCase = logging.getLogger(__name__) def lowerCAmelCase__( lowercase : str ) -> List[str]: __snake_case : int = git.Repo(search_parent_directories=lowercase ) __snake_case : Union[str, Any] = { "repo_id": str(lowercase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f: json.dump(lowercase , lowercase , indent=4 ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]: if params.n_gpu <= 0: __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = -1 __snake_case : Union[str, Any] = True __snake_case : Tuple = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : int = int(os.environ["N_GPU_NODE"] ) __snake_case : Union[str, Any] = int(os.environ["RANK"] ) # number of nodes / node ID __snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node __snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node __snake_case : Union[str, Any] = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __snake_case : Any = 1 __snake_case : str = 0 __snake_case : Optional[Any] = 0 __snake_case : Dict = 0 __snake_case : int = 1 __snake_case : Optional[Any] = 1 __snake_case : Tuple = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0 __snake_case : List[Any] = params.n_nodes > 1 # summary __snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
326
1
from ....configuration_utils import PretrainedConfig from ....utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[Any] ="trajectory_transformer" UpperCAmelCase_ : str =["past_key_values"] UpperCAmelCase_ : Optional[Any] ={ "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , UpperCAmelCase=100 , UpperCAmelCase=5 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=249 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=25 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=128 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0_006 , UpperCAmelCase=512 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-12 , UpperCAmelCase=1 , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=50256 , UpperCAmelCase=50256 , **UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' __snake_case : Tuple = vocab_size __snake_case : Optional[int] = action_weight __snake_case : int = reward_weight __snake_case : List[Any] = value_weight __snake_case : Optional[int] = max_position_embeddings __snake_case : List[Any] = block_size __snake_case : str = action_dim __snake_case : Union[str, Any] = observation_dim __snake_case : Tuple = transition_dim __snake_case : Any = learning_rate __snake_case : Dict = n_layer __snake_case : int = n_head __snake_case : Tuple = n_embd __snake_case : str = embd_pdrop __snake_case : Any = attn_pdrop __snake_case : Tuple = resid_pdrop __snake_case : Any = initializer_range __snake_case : Any = layer_norm_eps __snake_case : List[str] = kaiming_initializer_range __snake_case : Tuple = use_cache super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
326
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : str =JukeboxTokenizer UpperCAmelCase_ : Tuple ={ "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) __snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : Optional[Any] = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) __snake_case : Tuple = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : int = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
326
1
from collections.abc import Callable def lowerCAmelCase__( lowercase : Callable[[float], float] , lowercase : float , lowercase : float ) -> float: __snake_case : float = a __snake_case : float = b if function(lowercase ) == 0: # one of the a or b is a root for the function return a elif function(lowercase ) == 0: return b elif ( function(lowercase ) * function(lowercase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval." ) else: __snake_case : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase ) == 0: return mid elif function(lowercase ) * function(lowercase ) < 0: __snake_case : Any = mid else: __snake_case : Optional[int] = mid __snake_case : Optional[int] = start + (end - start) / 2.0 return mid def lowerCAmelCase__( lowercase : float ) -> float: return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
326
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : str UpperCAmelCase_ : str =None @staticmethod def UpperCAmelCase ( ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def UpperCAmelCase ( cls ) -> Tuple: '''simple docstring''' return F"""`pip install {cls.pip_package or cls.name}`""" class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[int] ="optuna" @staticmethod def UpperCAmelCase ( ) -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict: '''simple docstring''' return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' return default_hp_space_optuna(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[str] ="ray" UpperCAmelCase_ : Dict ="'ray[tune]'" @staticmethod def UpperCAmelCase ( ) -> str: '''simple docstring''' return is_ray_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return default_hp_space_ray(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple ="sigopt" @staticmethod def UpperCAmelCase ( ) -> int: '''simple docstring''' return is_sigopt_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict: '''simple docstring''' return default_hp_space_sigopt(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : str ="wandb" @staticmethod def UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' return is_wandb_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return default_hp_space_wandb(UpperCAmelCase ) _UpperCamelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCAmelCase__( ) -> str: __snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase ) > 0: __snake_case : Dict = available_backends[0].name if len(lowercase ) > 1: logger.info( f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
326
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _UpperCamelCase = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def lowerCAmelCase__( lowercase : Optional[int] , lowercase : List[str] ) -> Optional[int]: warnings.warn(lowercase , lowercase ) requires_backends(lowercase , "sklearn" ) return (preds == labels).mean() def lowerCAmelCase__( lowercase : Any , lowercase : Optional[Any] ) -> Optional[int]: warnings.warn(lowercase , lowercase ) requires_backends(lowercase , "sklearn" ) __snake_case : Union[str, Any] = simple_accuracy(lowercase , lowercase ) __snake_case : List[str] = fa_score(y_true=lowercase , y_pred=lowercase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> Dict: warnings.warn(lowercase , lowercase ) requires_backends(lowercase , "sklearn" ) __snake_case : str = pearsonr(lowercase , lowercase )[0] __snake_case : Any = spearmanr(lowercase , lowercase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def lowerCAmelCase__( lowercase : Tuple , lowercase : Any , lowercase : List[str] ) -> Tuple: warnings.warn(lowercase , lowercase ) requires_backends(lowercase , "sklearn" ) assert len(lowercase ) == len(lowercase ), f"""Predictions and labels have mismatched lengths {len(lowercase )} and {len(lowercase )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(lowercase , lowercase )} elif task_name == "sst-2": return {"acc": simple_accuracy(lowercase , lowercase )} elif task_name == "mrpc": return acc_and_fa(lowercase , lowercase ) elif task_name == "sts-b": return pearson_and_spearman(lowercase , lowercase ) elif task_name == "qqp": return acc_and_fa(lowercase , lowercase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(lowercase , lowercase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(lowercase , lowercase )} elif task_name == "qnli": return {"acc": simple_accuracy(lowercase , lowercase )} elif task_name == "rte": return {"acc": simple_accuracy(lowercase , lowercase )} elif task_name == "wnli": return {"acc": simple_accuracy(lowercase , lowercase )} elif task_name == "hans": return {"acc": simple_accuracy(lowercase , lowercase )} else: raise KeyError(lowercase ) def lowerCAmelCase__( lowercase : int , lowercase : Optional[int] , lowercase : str ) -> List[Any]: warnings.warn(lowercase , lowercase ) requires_backends(lowercase , "sklearn" ) if len(lowercase ) != len(lowercase ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(lowercase )} and {len(lowercase )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(lowercase , lowercase )} else: raise KeyError(lowercase )
326
import math def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list: __snake_case : Any = end or len(lowercase ) for i in range(lowercase , lowercase ): __snake_case : List[str] = i __snake_case : Union[str, Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __snake_case : Optional[Any] = array[temp_index - 1] temp_index -= 1 __snake_case : Any = temp_index_value return array def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap __snake_case : Any = index __snake_case : Optional[Any] = 2 * index + 1 # Left Node __snake_case : str = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __snake_case : Optional[int] = left_index if right_index < heap_size and array[largest] < array[right_index]: __snake_case : Tuple = right_index if largest != index: __snake_case , __snake_case : int = array[largest], array[index] heapify(lowercase , lowercase , lowercase ) def lowerCAmelCase__( lowercase : list ) -> list: __snake_case : List[str] = len(lowercase ) for i in range(n // 2 , -1 , -1 ): heapify(lowercase , lowercase , lowercase ) for i in range(n - 1 , 0 , -1 ): __snake_case , __snake_case : Optional[Any] = array[0], array[i] heapify(lowercase , 0 , lowercase ) return array def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: __snake_case : Union[str, Any] = low __snake_case : Union[str, Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __snake_case , __snake_case : str = array[j], array[i] i += 1 def lowerCAmelCase__( lowercase : list ) -> list: if len(lowercase ) == 0: return array __snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) ) __snake_case : Dict = 16 return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase ) def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(lowercase ) max_depth -= 1 __snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 ) __snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase ) intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase ) __snake_case : List[str] = p return insertion_sort(lowercase , lowercase , lowercase ) if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip() _UpperCamelCase = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
326
1
import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[str] ="MCTCTFeatureExtractor" UpperCAmelCase_ : List[Any] ="AutoTokenizer" def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> str: '''simple docstring''' super().__init__(UpperCAmelCase , UpperCAmelCase ) __snake_case : int = self.feature_extractor __snake_case : int = False def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*UpperCAmelCase , **UpperCAmelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) __snake_case : Union[str, Any] = kwargs.pop("raw_speech" ) else: __snake_case : Optional[Any] = kwargs.pop("audio" , UpperCAmelCase ) __snake_case : Dict = kwargs.pop("sampling_rate" , UpperCAmelCase ) __snake_case : Optional[Any] = kwargs.pop("text" , UpperCAmelCase ) if len(UpperCAmelCase ) > 0: __snake_case : List[Any] = args[0] __snake_case : Optional[int] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: __snake_case : str = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase ) if text is not None: __snake_case : Tuple = self.tokenizer(UpperCAmelCase , **UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: __snake_case : str = encodings["input_ids"] return inputs def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict: '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*UpperCAmelCase , **UpperCAmelCase ) __snake_case : str = kwargs.pop("input_features" , UpperCAmelCase ) __snake_case : int = kwargs.pop("labels" , UpperCAmelCase ) if len(UpperCAmelCase ) > 0: __snake_case : Optional[Any] = args[0] __snake_case : Optional[Any] = args[1:] if input_features is not None: __snake_case : Optional[Any] = self.feature_extractor.pad(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) if labels is not None: __snake_case : List[str] = self.tokenizer.pad(UpperCAmelCase , **UpperCAmelCase ) if labels is None: return input_features elif input_features is None: return labels else: __snake_case : Dict = labels["input_ids"] return input_features def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @contextmanager def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) __snake_case : List[str] = True __snake_case : Dict = self.tokenizer yield __snake_case : Dict = self.feature_extractor __snake_case : Dict = False
326
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowerCAmelCase__( ) -> List[Any]: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case : Any = [1, 2, 3] with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=2 ) with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def lowerCAmelCase__( lowercase : Dict ) -> Dict: __snake_case : Any = [1, 2] __snake_case : Dict = {"a": 1, "b": 2} __snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]} __snake_case : int = {"a": {"1": 1}, "b": 2} __snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case : Dict = [2, 3] __snake_case : Tuple = {"a": 2, "b": 3} __snake_case : int = {"a": [2, 3], "b": [4, 5]} __snake_case : Dict = {"a": {"1": 2}, "b": 3} __snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
326
1
def lowerCAmelCase__( lowercase : int = 100 ) -> int: __snake_case : Tuple = n * (n + 1) * (2 * n + 1) / 6 __snake_case : List[Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
326
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any: __snake_case : Optional[Any] = False __snake_case : Optional[Any] = search_prob __snake_case : str = start_temperate __snake_case : List[Any] = [] __snake_case : str = 0 __snake_case : Dict = None while not search_end: __snake_case : List[Any] = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case : List[Any] = current_state scores.append(lowercase ) iterations += 1 __snake_case : Dict = None __snake_case : str = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor __snake_case : int = neighbors.pop(lowercase ) __snake_case : Optional[Any] = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case : Any = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case : List[str] = picked_neighbor else: __snake_case : Optional[Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case : str = picked_neighbor __snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case : Optional[Any] = True else: __snake_case : str = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(lowercase ) , lowercase ) plt.xlabel("Iterations" ) plt.ylabel("Function values" ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) _UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any: return (3 * x**2) - (6 * y) _UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) _UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
326
1
from math import pi, sqrt def lowerCAmelCase__( lowercase : float ) -> float: if num <= 0: raise ValueError("math domain error" ) if num > 1_7_1.5: raise OverflowError("math range error" ) elif num - int(lowercase ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(lowercase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowerCAmelCase__( ) -> None: assert gamma(0.5 ) == sqrt(lowercase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _UpperCamelCase = 1.0 while num: _UpperCamelCase = float(input('''Gamma of: ''')) print(F'''gamma({num}) = {gamma(num)}''') print('''\nEnter 0 to exit...''')
326
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"] UpperCAmelCase_ : Tuple ="FlavaImageProcessor" UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast") def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) __snake_case : List[Any] = kwargs.pop("feature_extractor" ) __snake_case : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = self.image_processor def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __snake_case : Union[str, Any] = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if images is not None: __snake_case : Union[str, Any] = self.image_processor( UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if text is not None and images is not None: encoding.update(UpperCAmelCase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[Any] = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
326
1
import math def lowerCAmelCase__( ) -> None: __snake_case : str = input("Enter message: " ) __snake_case : List[Any] = int(input(f"""Enter key [2-{len(lowercase ) - 1}]: """ ) ) __snake_case : Tuple = input("Encryption/Decryption [e/d]: " ) if mode.lower().startswith("e" ): __snake_case : int = encrypt_message(lowercase , lowercase ) elif mode.lower().startswith("d" ): __snake_case : Optional[int] = decrypt_message(lowercase , lowercase ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + "|"}""" ) def lowerCAmelCase__( lowercase : int , lowercase : str ) -> str: __snake_case : Any = [""] * key for col in range(lowercase ): __snake_case : Any = col while pointer < len(lowercase ): cipher_text[col] += message[pointer] pointer += key return "".join(lowercase ) def lowerCAmelCase__( lowercase : int , lowercase : str ) -> str: __snake_case : str = math.ceil(len(lowercase ) / key ) __snake_case : Tuple = key __snake_case : Optional[int] = (num_cols * num_rows) - len(lowercase ) __snake_case : Optional[Any] = [""] * num_cols __snake_case : Dict = 0 __snake_case : Any = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): __snake_case : int = 0 row += 1 return "".join(lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
326
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } _UpperCamelCase = { '''camembert-base''': 512, } _UpperCamelCase = '''▁''' class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : str =["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' __snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token __snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) __snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase ) ) __snake_case : Dict = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __snake_case : Optional[int] = len(self.fairseq_tokens_to_ids ) __snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Dict = [self.cls_token_id] __snake_case : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1] def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(UpperCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = [] __snake_case : Union[str, Any] = "" __snake_case : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase ) + token __snake_case : List[Any] = True __snake_case : Union[str, Any] = [] else: current_sub_tokens.append(UpperCAmelCase ) __snake_case : int = False out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def __getstate__( self ) -> List[Any]: '''simple docstring''' __snake_case : str = self.__dict__.copy() __snake_case : Optional[Any] = None return state def __setstate__( self , UpperCAmelCase ) -> str: '''simple docstring''' __snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case : List[str] = {} __snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case : Optional[Any] = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: __snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,)
326
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = torch.device('''cpu''') def lowerCAmelCase__( ) -> Any: __snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCAmelCase__( lowercase : Dict ) -> List[Any]: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] ) def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]: __snake_case : List[Any] = dct.pop(lowercase ) __snake_case : List[Any] = val def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple: __snake_case : Optional[Any] = [] for k in state_dict.keys(): __snake_case : Union[str, Any] = k if ".pwconv" in k: __snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: __snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: __snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: __snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: __snake_case : int = k_new.split("." ) if ls[2].isdigit(): __snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: __snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]: __snake_case : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __snake_case : Tuple = 1000 __snake_case : Any = "huggingface/label-files" __snake_case : int = "imagenet-1k-id2label.json" __snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) __snake_case : str = {int(lowercase ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __snake_case : Optional[Any] = [3, 3, 6, 4] __snake_case : Optional[int] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __snake_case : List[str] = [3, 3, 9, 6] __snake_case : Optional[Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __snake_case : Optional[int] = [4, 3, 10, 5] __snake_case : Dict = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __snake_case : str = [4, 4, 12, 6] __snake_case : Optional[Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): __snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase ) else: __snake_case : Tuple = torch.load(lowercase , map_location="cpu" ) __snake_case : Optional[int] = checkpoint __snake_case : Any = create_rename_keys(lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) # load HuggingFace model __snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval() hf_model.load_state_dict(lowercase ) # prepare test inputs __snake_case : Optional[Any] = prepare_img() __snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" ) __snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" ) # compare outputs from both models __snake_case : str = get_expected_output(lowercase ) __snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _UpperCamelCase = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
326
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool: __snake_case : List[str] = len(lowercase ) __snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __snake_case : Optional[Any] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __snake_case : Union[str, Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __snake_case : List[str] = subset[i - 1][j] if arr[i - 1] <= j: __snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
326
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Any ="openai-gpt" UpperCAmelCase_ : Union[str, Any] ={ "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , UpperCAmelCase=40478 , UpperCAmelCase=512 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1E-5 , UpperCAmelCase=0.02 , UpperCAmelCase="cls_index" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=0.1 , **UpperCAmelCase , ) -> str: '''simple docstring''' __snake_case : Optional[int] = vocab_size __snake_case : Dict = n_positions __snake_case : Union[str, Any] = n_embd __snake_case : int = n_layer __snake_case : Optional[int] = n_head __snake_case : Dict = afn __snake_case : Dict = resid_pdrop __snake_case : Optional[Any] = embd_pdrop __snake_case : List[Any] = attn_pdrop __snake_case : Union[str, Any] = layer_norm_epsilon __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[int] = summary_type __snake_case : List[str] = summary_use_proj __snake_case : Any = summary_activation __snake_case : List[Any] = summary_first_dropout __snake_case : Union[str, Any] = summary_proj_to_labels super().__init__(**UpperCAmelCase )
326
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _UpperCamelCase = 4 _UpperCamelCase = 3 class _lowerCamelCase ( a ): """simple docstring""" pass def lowerCAmelCase__( lowercase : List[str] ) -> Any: for shard in shards: for i in range(lowercase ): yield {"i": i, "shard": shard} def lowerCAmelCase__( ) -> Optional[int]: __snake_case : List[Any] = int(os.environ["RANK"] ) __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : List[str] = ArgumentParser() parser.add_argument("--streaming" , type=lowercase ) parser.add_argument("--local_rank" , type=lowercase ) parser.add_argument("--num_workers" , type=lowercase , default=0 ) __snake_case : Any = parser.parse_args() __snake_case : Dict = args.streaming __snake_case : Union[str, Any] = args.num_workers __snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]} __snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase ) if not streaming: __snake_case : Any = Dataset.from_list(list(lowercase ) ) __snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase ) __snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase ) __snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __snake_case : List[str] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __snake_case : Dict = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
326
1
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) __snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" ) __snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids __snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids __snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) __snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits __snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean() __snake_case : Any = -(labels.shape[-1] * loss.item()) __snake_case : List[str] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
326
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int: __snake_case : List[Any] = limit + 1 __snake_case : List[str] = [0] * limit for first_term in range(1 , lowercase ): for n in range(lowercase , lowercase , lowercase ): __snake_case : Union[str, Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
326
1
_UpperCamelCase = 0 # The first color of the flag. _UpperCamelCase = 1 # The second color of the flag. _UpperCamelCase = 2 # The third color of the flag. _UpperCamelCase = (red, white, blue) def lowerCAmelCase__( lowercase : list ) -> list: if not sequence: return [] if len(lowercase ) == 1: return list(lowercase ) __snake_case : Optional[Any] = 0 __snake_case : Union[str, Any] = len(lowercase ) - 1 __snake_case : Tuple = 0 while mid <= high: if sequence[mid] == colors[0]: __snake_case , __snake_case : List[str] = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: __snake_case , __snake_case : Dict = sequence[high], sequence[mid] high -= 1 else: __snake_case : List[str] = f"""The elements inside the sequence must contains only {colors} values""" raise ValueError(lowercase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = input('''Enter numbers separated by commas:\n''').strip() _UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')] print(F'''{dutch_national_flag_sort(unsorted)}''')
326
from __future__ import annotations def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]: __snake_case : List[str] = word_bank or [] # create a table __snake_case : int = len(lowercase ) + 1 __snake_case : list[list[list[str]]] = [] for _ in range(lowercase ): table.append([] ) # seed value __snake_case : Optional[int] = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowercase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowercase )] == word: __snake_case : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowercase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowercase )]: combination.reverse() return table[len(lowercase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
326
1
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : str UpperCAmelCase_ : str =None @staticmethod def UpperCAmelCase ( ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def UpperCAmelCase ( cls ) -> Tuple: '''simple docstring''' return F"""`pip install {cls.pip_package or cls.name}`""" class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[int] ="optuna" @staticmethod def UpperCAmelCase ( ) -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict: '''simple docstring''' return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' return default_hp_space_optuna(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[str] ="ray" UpperCAmelCase_ : Dict ="'ray[tune]'" @staticmethod def UpperCAmelCase ( ) -> str: '''simple docstring''' return is_ray_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return default_hp_space_ray(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple ="sigopt" @staticmethod def UpperCAmelCase ( ) -> int: '''simple docstring''' return is_sigopt_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict: '''simple docstring''' return default_hp_space_sigopt(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : str ="wandb" @staticmethod def UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' return is_wandb_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return default_hp_space_wandb(UpperCAmelCase ) _UpperCamelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCAmelCase__( ) -> str: __snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase ) > 0: __snake_case : Dict = available_backends[0].name if len(lowercase ) > 1: logger.info( f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
326
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = parent __snake_case : Tuple = batch_size __snake_case : List[str] = seq_length __snake_case : Optional[int] = is_training __snake_case : int = use_attention_mask __snake_case : Union[str, Any] = use_token_type_ids __snake_case : Any = use_labels __snake_case : List[str] = vocab_size __snake_case : int = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : List[Any] = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : Dict = initializer_range __snake_case : List[Any] = num_choices __snake_case : Union[str, Any] = rescale_embeddings __snake_case : List[Any] = attention_type __snake_case : str = use_bias __snake_case : Dict = block_size __snake_case : Optional[Any] = num_random_blocks def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_attention_mask: __snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Union[str, Any] = None if self.use_token_type_ids: __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[int] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs __snake_case : int = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class _lowerCamelCase ( a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) UpperCAmelCase_ : Dict =False UpperCAmelCase_ : str =False def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Dict = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Any: '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_hidden_states_output() @slow def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = model_class(UpperCAmelCase ) @jax.jit def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ): return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase ) with self.subTest("JIT Enabled" ): __snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int: '''simple docstring''' if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
326
1
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] _UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def lowerCAmelCase__( lowercase : str ) -> Optional[Any]: __snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" ) return sd def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict: __snake_case : Tuple = OrderedDict() __snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __snake_case : Optional[Any] = key for name_pair in rename_keys_prefix: __snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __snake_case : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __snake_case : List[Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]: assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: __snake_case : Any = "pretraining" if "vcr" in checkpoint_path: __snake_case : Optional[Any] = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: __snake_case : Tuple = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: __snake_case : Any = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 512} __snake_case : Any = "multichoice" elif "vqa_advanced" in checkpoint_path: __snake_case : List[Any] = {"visual_embedding_dim": 2048} __snake_case : Optional[Any] = "vqa_advanced" elif "vqa" in checkpoint_path: __snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129} __snake_case : Union[str, Any] = "vqa" elif "nlvr" in checkpoint_path: __snake_case : Tuple = { "visual_embedding_dim": 1024, "num_labels": 2, } __snake_case : List[Any] = "nlvr" __snake_case : Union[str, Any] = VisualBertConfig(**lowercase ) # Load State Dict __snake_case : Any = load_state_dict(lowercase ) __snake_case : Dict = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": __snake_case : Optional[Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": __snake_case : Tuple = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": __snake_case : Tuple = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": __snake_case : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') _UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
326
import argparse import datetime def lowerCAmelCase__( lowercase : str ) -> str: __snake_case : int = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } __snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase ) < 11: raise ValueError("Must be 10 characters long" ) # Get month __snake_case : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12" ) __snake_case : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get day __snake_case : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31" ) # Get second separator __snake_case : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get year __snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation __snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) ) # Start math if m <= 2: __snake_case : Optional[Any] = y - 1 __snake_case : Tuple = m + 12 # maths var __snake_case : int = int(str(lowercase )[:2] ) __snake_case : int = int(str(lowercase )[2:] ) __snake_case : int = int(2.6 * m - 5.3_9 ) __snake_case : int = int(c / 4 ) __snake_case : int = int(k / 4 ) __snake_case : int = int(d + k ) __snake_case : int = int(t + u + v + x ) __snake_case : int = int(z - (2 * c) ) __snake_case : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response __snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) _UpperCamelCase = parser.parse_args() zeller(args.date_input)
326
1
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _UpperCamelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _UpperCamelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _UpperCamelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : int ) -> int: return float((preds == labels).mean() ) def lowerCAmelCase__( lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any]="binary" ) -> Optional[int]: __snake_case : Optional[int] = simple_accuracy(lowercase , lowercase ) __snake_case : Any = float(fa_score(y_true=lowercase , y_pred=lowercase , average=lowercase ) ) return { "accuracy": acc, "f1": fa, } def lowerCAmelCase__( lowercase : Any , lowercase : Optional[int] ) -> List[str]: __snake_case : Any = {} for id_pred, label in zip(lowercase , lowercase ): __snake_case : Dict = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}""" __snake_case : List[str] = id_pred["prediction"] if question_id in question_map: question_map[question_id].append((pred, label) ) else: __snake_case : Tuple = [(pred, label)] __snake_case , __snake_case : List[Any] = [], [] for question, preds_labels in question_map.items(): __snake_case , __snake_case : Dict = zip(*lowercase ) __snake_case : Tuple = fa_score(y_true=lowercase , y_pred=lowercase , average="macro" ) fas.append(lowercase ) __snake_case : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase ) ) ems.append(lowercase ) __snake_case : Any = float(sum(lowercase ) / len(lowercase ) ) __snake_case : Optional[Any] = sum(lowercase ) / len(lowercase ) __snake_case : Tuple = float(fa_score(y_true=lowercase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCamelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "prediction_text": datasets.Value("string" ), }, "references": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "answers": datasets.Sequence(datasets.Value("string" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("int64" ), "paragraph": datasets.Value("int64" ), "question": datasets.Value("int64" ), }, "prediction": datasets.Value("int64" ), }, "references": datasets.Value("int64" ), } else: return { "predictions": datasets.Value("int64" ), "references": datasets.Value("int64" ), } def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple: '''simple docstring''' if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(UpperCAmelCase , UpperCAmelCase , fa_avg="macro" ) elif self.config_name == "record": __snake_case : Union[str, Any] = [ { "qas": [ {"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]} for ref in references ] } ] __snake_case : List[str] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions} return evaluate_record(UpperCAmelCase , UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(UpperCAmelCase , UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )} else: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
326
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int: if index == r: for j in range(lowercase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __snake_case : Union[str, Any] = arr[i] combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]: # A temporary array to store all combination one by one __snake_case : Tuple = [0] * r # Print all combination using temporary array 'data[]' combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 ) if __name__ == "__main__": # Driver code to check the function above _UpperCamelCase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
326
1
from __future__ import annotations import requests _UpperCamelCase = set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def lowerCAmelCase__( lowercase : str , lowercase : int = 1 , lowercase : str = "new" , lowercase : list | None = None ) -> dict: __snake_case : List[str] = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowercase ) - valid_terms ) ): __snake_case : Dict = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowercase ) __snake_case : Optional[Any] = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError __snake_case : List[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowercase )} __snake_case : Optional[int] = {} for id_ in range(lowercase ): __snake_case : str = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
326
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] _UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def lowerCAmelCase__( lowercase : str ) -> Optional[Any]: __snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" ) return sd def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict: __snake_case : Tuple = OrderedDict() __snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __snake_case : Optional[Any] = key for name_pair in rename_keys_prefix: __snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __snake_case : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __snake_case : List[Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]: assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: __snake_case : Any = "pretraining" if "vcr" in checkpoint_path: __snake_case : Optional[Any] = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: __snake_case : Tuple = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: __snake_case : Any = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 512} __snake_case : Any = "multichoice" elif "vqa_advanced" in checkpoint_path: __snake_case : List[Any] = {"visual_embedding_dim": 2048} __snake_case : Optional[Any] = "vqa_advanced" elif "vqa" in checkpoint_path: __snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129} __snake_case : Union[str, Any] = "vqa" elif "nlvr" in checkpoint_path: __snake_case : Tuple = { "visual_embedding_dim": 1024, "num_labels": 2, } __snake_case : List[Any] = "nlvr" __snake_case : Union[str, Any] = VisualBertConfig(**lowercase ) # Load State Dict __snake_case : Any = load_state_dict(lowercase ) __snake_case : Dict = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": __snake_case : Optional[Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": __snake_case : Tuple = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": __snake_case : Tuple = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": __snake_case : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') _UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
326
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') _UpperCamelCase = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} _UpperCamelCase = '''>>zh<<''' _UpperCamelCase = '''Helsinki-NLP/''' if is_torch_available(): _UpperCamelCase = '''pt''' elif is_tf_available(): _UpperCamelCase = '''tf''' else: _UpperCamelCase = '''jax''' @require_sentencepiece class _lowerCamelCase ( a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Optional[int] =MarianTokenizer UpperCAmelCase_ : str =False UpperCAmelCase_ : Dict =True def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().setUp() __snake_case : Optional[int] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] __snake_case : Any = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) __snake_case : Optional[int] = Path(self.tmpdirname ) save_json(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] ) __snake_case : Union[str, Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self , **UpperCAmelCase ) -> MarianTokenizer: '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return ( "This is a test", "This is a test", ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Union[str, Any] = "</s>" __snake_case : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(UpperCAmelCase ) , 9 ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : str = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" ) __snake_case : int = en_de_tokenizer(["I am a small frog"] , return_tensors=UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) __snake_case : Dict = [38, 121, 14, 697, 38848, 0] self.assertListEqual(UpperCAmelCase , batch.input_ids[0] ) __snake_case : Dict = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCAmelCase ) __snake_case : Tuple = [x.name for x in Path(UpperCAmelCase ).glob("*" )] self.assertIn("source.spm" , UpperCAmelCase ) MarianTokenizer.from_pretrained(UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : List[str] = self.get_tokenizer() __snake_case : Optional[Any] = tok( ["I am a small frog" * 1000, "I am a small frog"] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : List[str] = self.get_tokenizer() __snake_case : str = tok(["I am a tiny frog", "I am a small frog"] , padding=UpperCAmelCase , return_tensors=UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : List[Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) __snake_case : Any = "Tämä on testi" __snake_case : Tuple = "This is a test" __snake_case : Tuple = [76, 7, 2047, 2] __snake_case : Dict = [69, 12, 11, 940, 2] __snake_case : int = tokenizer(UpperCAmelCase ).input_ids self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) __snake_case : Union[str, Any] = tokenizer(text_target=UpperCAmelCase ).input_ids self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) __snake_case : List[Any] = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) self.assertEqual(UpperCAmelCase , UpperCAmelCase )
326
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple: # Load configuration defined in the metadata file with open(lowercase ) as metadata_file: __snake_case : int = json.load(lowercase ) __snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] ) # Load in the weights from the checkpoint_path __snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"] # Load the entity vocab file __snake_case : Tuple = load_original_entity_vocab(lowercase ) # add an entry for [MASK2] __snake_case : Optional[int] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 __snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks __snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase ) __snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(lowercase ) with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f: __snake_case : Tuple = json.load(lowercase ) __snake_case : List[Any] = "MLukeTokenizer" with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(lowercase , lowercase ) __snake_case : Any = MLukeTokenizer.from_pretrained(lowercase ) # Initialize the embeddings of the special tokens __snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0] __snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0] __snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"] __snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: __snake_case : List[Any] = state_dict[bias_name] __snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 ) __snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 ) __snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self.""" __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] __snake_case : str = state_dict[prefix + matrix_name] __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"] __snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Any = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' __snake_case : List[Any] = state_dict["entity_predictions.bias"] __snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) __snake_case : Any = LukeForMaskedLM(config=lowercase ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) __snake_case : int = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): __snake_case : str = state_dict[key] else: __snake_case : str = state_dict[key] __snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase ) if set(lowercase ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(lowercase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs __snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" ) __snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." __snake_case : Union[str, Any] = (0, 9) __snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : Any = model(**lowercase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : Optional[Any] = torch.Size((1, 33, 768) ) __snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : str = torch.Size((1, 1, 768) ) __snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction __snake_case : str = MLukeTokenizer.from_pretrained(lowercase ) __snake_case : Dict = "Tokyo is the capital of <mask>." __snake_case : Union[str, Any] = (24, 30) __snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : int = model(**lowercase ) __snake_case : Dict = encoding["input_ids"][0].tolist() __snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) __snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase ) __snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item() __snake_case : Optional[int] = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase ) ) model.save_pretrained(lowercase ) def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]: __snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"] __snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )] __snake_case : Any = {} for entry in data: __snake_case : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: __snake_case : Optional[int] = entity_id break __snake_case : Union[str, Any] = f"""{language}:{entity_name}""" __snake_case : Any = entity_id return new_mapping if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) _UpperCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any]=5 ) -> str: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count("<mask>" ) == 1 __snake_case : List[Any] = torch.tensor(tokenizer.encode(lowercase , add_special_tokens=lowercase ) ).unsqueeze(0 ) # Batch size 1 __snake_case : Tuple = model(lowercase )[0] # The last hidden-state is the first element of the output tuple __snake_case : str = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __snake_case : Optional[Any] = logits[0, masked_index, :] __snake_case : Dict = logits.softmax(dim=0 ) __snake_case , __snake_case : Optional[int] = prob.topk(k=lowercase , dim=0 ) __snake_case : Dict = " ".join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase ) )] ) __snake_case : List[str] = tokenizer.mask_token __snake_case : List[Any] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ): __snake_case : Tuple = predicted_token_bpe.replace("\u2581" , " " ) if " {0}".format(lowercase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(" {0}".format(lowercase ) , lowercase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(lowercase , lowercase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs _UpperCamelCase = CamembertTokenizer.from_pretrained('''camembert-base''') _UpperCamelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() _UpperCamelCase = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
326
from maths.prime_factors import prime_factors def lowerCAmelCase__( lowercase : int ) -> int: if not isinstance(lowercase , lowercase ): __snake_case : Optional[int] = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(lowercase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
326
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = data __snake_case : Any = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0] @staticmethod def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str: '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : int = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64) __snake_case : Any = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def UpperCAmelCase ( self ) -> int: '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' __snake_case : Any = list(struct.unpack(">16L" , UpperCAmelCase ) ) + [0] * 64 for i in range(16 , 80 ): __snake_case : Optional[int] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Tuple = self.padding() __snake_case : Tuple = self.split_blocks() for block in self.blocks: __snake_case : Union[str, Any] = self.expand_block(UpperCAmelCase ) __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: __snake_case : Union[str, Any] = (b & c) | ((~b) & d) __snake_case : int = 0x5A827999 elif 20 <= i < 40: __snake_case : Any = b ^ c ^ d __snake_case : Tuple = 0x6ED9EBA1 elif 40 <= i < 60: __snake_case : Dict = (b & c) | (b & d) | (c & d) __snake_case : Optional[Any] = 0x8F1BBCDC elif 60 <= i < 80: __snake_case : int = b ^ c ^ d __snake_case : Optional[Any] = 0xCA62C1D6 __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = ( self.rotate(UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xFFFFFFFF, a, self.rotate(UpperCAmelCase , 30 ), c, d, ) __snake_case : str = ( self.h[0] + a & 0xFFFFFFFF, self.h[1] + b & 0xFFFFFFFF, self.h[2] + c & 0xFFFFFFFF, self.h[3] + d & 0xFFFFFFFF, self.h[4] + e & 0xFFFFFFFF, ) return ("{:08x}" * 5).format(*self.h ) def lowerCAmelCase__( ) -> str: __snake_case : List[str] = B"Test String" assert SHAaHash(lowercase ).final_hash() == hashlib.shaa(lowercase ).hexdigest() # noqa: S324 def lowerCAmelCase__( ) -> str: __snake_case : int = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" ) __snake_case : Optional[int] = parser.parse_args() __snake_case : List[Any] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: __snake_case : Optional[Any] = f.read() else: __snake_case : Optional[int] = bytes(lowercase , "utf-8" ) print(SHAaHash(lowercase ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
326
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) __snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" ) __snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids __snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids __snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) __snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits __snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean() __snake_case : Any = -(labels.shape[-1] * loss.item()) __snake_case : List[str] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
326
1
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING _UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(a ) class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , **UpperCAmelCase ) -> Any: '''simple docstring''' super().__init__(**UpperCAmelCase ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" ) self.check_model_type(UpperCAmelCase ) def UpperCAmelCase ( self , **UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : Dict = {} __snake_case : int = {} __snake_case : Any = {} # preprocess args if "points_per_batch" in kwargs: __snake_case : List[str] = kwargs["points_per_batch"] if "points_per_crop" in kwargs: __snake_case : Optional[Any] = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: __snake_case : str = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: __snake_case : Dict = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: __snake_case : Optional[int] = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: __snake_case : List[str] = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: __snake_case : Optional[int] = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: __snake_case : Tuple = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: __snake_case : Optional[Any] = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: __snake_case : str = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: __snake_case : int = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: __snake_case : int = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , UpperCAmelCase , *UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' return super().__call__(UpperCAmelCase , *UpperCAmelCase , num_workers=UpperCAmelCase , batch_size=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=64 , UpperCAmelCase = 0 , UpperCAmelCase = 512 / 1500 , UpperCAmelCase = 32 , UpperCAmelCase = 1 , ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = load_image(UpperCAmelCase ) __snake_case : Optional[int] = self.image_processor.size["longest_edge"] __snake_case , __snake_case , __snake_case , __snake_case : Any = self.image_processor.generate_crop_boxes( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __snake_case : Optional[int] = self.image_processor(images=UpperCAmelCase , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": __snake_case : Any = self.get_inference_context() with inference_context(): __snake_case : List[str] = self._ensure_tensor_on_device(UpperCAmelCase , device=self.device ) __snake_case : Optional[Any] = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) __snake_case : Union[str, Any] = image_embeddings __snake_case : Union[str, Any] = grid_points.shape[1] __snake_case : Tuple = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , UpperCAmelCase , UpperCAmelCase ): __snake_case : Tuple = grid_points[:, i : i + points_per_batch, :, :] __snake_case : Union[str, Any] = input_labels[:, i : i + points_per_batch] __snake_case : List[str] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0.88 , UpperCAmelCase=0.95 , UpperCAmelCase=0 , UpperCAmelCase=1 , ) -> List[Any]: '''simple docstring''' __snake_case : Dict = model_inputs.pop("input_boxes" ) __snake_case : str = model_inputs.pop("is_last" ) __snake_case : List[Any] = model_inputs.pop("original_sizes" ).tolist() __snake_case : Union[str, Any] = model_inputs.pop("reshaped_input_sizes" ).tolist() __snake_case : List[Any] = self.model(**UpperCAmelCase ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __snake_case : Optional[int] = model_outputs["pred_masks"] __snake_case : Optional[int] = self.image_processor.post_process_masks( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , binarize=UpperCAmelCase ) __snake_case : Any = model_outputs["iou_scores"] __snake_case , __snake_case , __snake_case : Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.7 , ) -> int: '''simple docstring''' __snake_case : Optional[int] = [] __snake_case : str = [] __snake_case : Dict = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) __snake_case : List[Any] = torch.cat(UpperCAmelCase ) __snake_case : int = torch.cat(UpperCAmelCase ) __snake_case , __snake_case , __snake_case , __snake_case : Tuple = self.image_processor.post_process_for_mask_generation( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __snake_case : int = defaultdict(UpperCAmelCase ) for output in model_outputs: for k, v in output.items(): extra[k].append(UpperCAmelCase ) __snake_case : Optional[Any] = {} if output_rle_mask: __snake_case : List[Any] = rle_mask if output_bboxes_mask: __snake_case : int = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
326
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]: '''simple docstring''' super().__init__(UpperCAmelCase ) __snake_case : Optional[int] = proj_size __snake_case : str = CLIPVisionModel(UpperCAmelCase ) __snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase ) __snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size ) __snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]: '''simple docstring''' __snake_case : int = self.model(pixel_values=UpperCAmelCase ) __snake_case : Optional[int] = clip_output.pooler_output __snake_case : Any = self.mapper(latent_states[:, None] ) __snake_case : Any = self.final_layer_norm(UpperCAmelCase ) __snake_case : str = self.proj_out(UpperCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : List[Any] = (config.num_hidden_layers + 1) // 5 __snake_case : Dict = config.hidden_size __snake_case : str = 1 __snake_case : List[Any] = nn.ModuleList( [ BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase ) for _ in range(UpperCAmelCase ) ] ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' for block in self.blocks: __snake_case : int = block(UpperCAmelCase ) return hidden_states
326
1
from __future__ import annotations def lowerCAmelCase__( lowercase : int ) -> bool: __snake_case : Tuple = str(lowercase ) return len(lowercase ) == 9 and set(lowercase ) == set("123456789" ) def lowerCAmelCase__( ) -> int | None: for base_num in range(9999 , 4999 , -1 ): __snake_case : Tuple = 10_0002 * base_num if is_9_pandigital(lowercase ): return candidate for base_num in range(333 , 99 , -1 ): __snake_case : Tuple = 100_2003 * base_num if is_9_pandigital(lowercase ): return candidate return None if __name__ == "__main__": print(F'''{solution() = }''')
326
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
326
1
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class _lowerCamelCase ( a , a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Dict =( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase_ : Tuple =( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase_ : Optional[int] =False UpperCAmelCase_ : str =False def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Any: '''simple docstring''' __snake_case : Dict = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) if return_labels: if model_class in get_values(UpperCAmelCase ): __snake_case : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = parent __snake_case : Union[str, Any] = batch_size __snake_case : Any = seq_length __snake_case : Union[str, Any] = is_training __snake_case : Tuple = use_input_mask __snake_case : Union[str, Any] = use_token_type_ids __snake_case : Tuple = use_labels __snake_case : List[str] = vocab_size __snake_case : int = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : Optional[Any] = intermediate_size __snake_case : Optional[int] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : List[str] = max_position_embeddings __snake_case : Optional[Any] = type_vocab_size __snake_case : Any = type_sequence_label_size __snake_case : List[Any] = initializer_range __snake_case : int = num_labels __snake_case : str = num_choices __snake_case : Any = scope __snake_case : str = embedding_size def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : List[str] = None if self.use_input_mask: __snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Union[str, Any] = None __snake_case : str = None __snake_case : List[Any] = None if self.use_labels: __snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : Tuple = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = TFMobileBertModel(config=UpperCAmelCase ) __snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : Tuple = model(UpperCAmelCase ) __snake_case : int = [input_ids, input_mask] __snake_case : str = model(UpperCAmelCase ) __snake_case : Optional[int] = model(UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[Any] = TFMobileBertForMaskedLM(config=UpperCAmelCase ) __snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : Dict = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase ) __snake_case : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : Tuple = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : str = TFMobileBertForPreTraining(config=UpperCAmelCase ) __snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : Any = model(UpperCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : List[Any] = TFMobileBertForSequenceClassification(config=UpperCAmelCase ) __snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : Tuple = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: '''simple docstring''' __snake_case : Dict = self.num_choices __snake_case : Union[str, Any] = TFMobileBertForMultipleChoice(config=UpperCAmelCase ) __snake_case : str = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : str = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : Any = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : Any = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __snake_case : Any = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: '''simple docstring''' __snake_case : Dict = self.num_labels __snake_case : List[str] = TFMobileBertForTokenClassification(config=UpperCAmelCase ) __snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : Dict = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : str = TFMobileBertForQuestionAnswering(config=UpperCAmelCase ) __snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : Tuple = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Optional[int] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = config_and_inputs __snake_case : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Dict = TFMobileBertModelTest.TFMobileBertModelTester(self ) __snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase ) @slow def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: __snake_case : Optional[Any] = TFMobileBertModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_tf class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : List[str] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" ) __snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) __snake_case : Tuple = model(UpperCAmelCase )[0] __snake_case : List[Any] = [1, 6, 30522] self.assertEqual(output.shape , UpperCAmelCase ) __snake_case : List[str] = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
326
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = torch.device('''cpu''') def lowerCAmelCase__( ) -> Any: __snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCAmelCase__( lowercase : Dict ) -> List[Any]: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] ) def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]: __snake_case : List[Any] = dct.pop(lowercase ) __snake_case : List[Any] = val def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple: __snake_case : Optional[Any] = [] for k in state_dict.keys(): __snake_case : Union[str, Any] = k if ".pwconv" in k: __snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: __snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: __snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: __snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: __snake_case : int = k_new.split("." ) if ls[2].isdigit(): __snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: __snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]: __snake_case : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __snake_case : Tuple = 1000 __snake_case : Any = "huggingface/label-files" __snake_case : int = "imagenet-1k-id2label.json" __snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) __snake_case : str = {int(lowercase ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __snake_case : Optional[Any] = [3, 3, 6, 4] __snake_case : Optional[int] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __snake_case : List[str] = [3, 3, 9, 6] __snake_case : Optional[Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __snake_case : Optional[int] = [4, 3, 10, 5] __snake_case : Dict = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __snake_case : str = [4, 4, 12, 6] __snake_case : Optional[Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): __snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase ) else: __snake_case : Tuple = torch.load(lowercase , map_location="cpu" ) __snake_case : Optional[int] = checkpoint __snake_case : Any = create_rename_keys(lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) # load HuggingFace model __snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval() hf_model.load_state_dict(lowercase ) # prepare test inputs __snake_case : Optional[Any] = prepare_img() __snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" ) __snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" ) # compare outputs from both models __snake_case : str = get_expected_output(lowercase ) __snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _UpperCamelCase = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
326
1
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : int , lowercase : str=False ) -> int: if isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ): __snake_case : Any = len(set_a.intersection(lowercase ) ) if alternative_union: __snake_case : str = len(lowercase ) + len(lowercase ) else: __snake_case : int = len(set_a.union(lowercase ) ) return intersection / union if isinstance(lowercase , (list, tuple) ) and isinstance(lowercase , (list, tuple) ): __snake_case : List[Any] = [element for element in set_a if element in set_b] if alternative_union: __snake_case : List[Any] = len(lowercase ) + len(lowercase ) return len(lowercase ) / union else: __snake_case : Dict = set_a + [element for element in set_b if element not in set_a] return len(lowercase ) / len(lowercase ) return len(lowercase ) / len(lowercase ) return None if __name__ == "__main__": _UpperCamelCase = {'''a''', '''b''', '''c''', '''d''', '''e'''} _UpperCamelCase = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
326
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) _UpperCamelCase = logging.getLogger(__name__) def lowerCAmelCase__( lowercase : str ) -> List[str]: __snake_case : int = git.Repo(search_parent_directories=lowercase ) __snake_case : Union[str, Any] = { "repo_id": str(lowercase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f: json.dump(lowercase , lowercase , indent=4 ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]: if params.n_gpu <= 0: __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = -1 __snake_case : Union[str, Any] = True __snake_case : Tuple = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : int = int(os.environ["N_GPU_NODE"] ) __snake_case : Union[str, Any] = int(os.environ["RANK"] ) # number of nodes / node ID __snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node __snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node __snake_case : Union[str, Any] = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __snake_case : Any = 1 __snake_case : str = 0 __snake_case : Optional[Any] = 0 __snake_case : Dict = 0 __snake_case : int = 1 __snake_case : Optional[Any] = 1 __snake_case : Tuple = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0 __snake_case : List[Any] = params.n_nodes > 1 # summary __snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
326
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def lowerCAmelCase__( lowercase : int ) -> List[List[ImageInput]]: if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Dict =["pixel_values"] def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' super().__init__(**UpperCAmelCase ) __snake_case : int = size if size is not None else {"shortest_edge": 224} __snake_case : List[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) __snake_case : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} __snake_case : str = get_size_dict(UpperCAmelCase , param_name="crop_size" ) __snake_case : str = do_resize __snake_case : str = size __snake_case : Union[str, Any] = do_center_crop __snake_case : Any = crop_size __snake_case : Any = resample __snake_case : Dict = do_rescale __snake_case : Optional[Any] = rescale_factor __snake_case : Optional[Any] = do_normalize __snake_case : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __snake_case : Union[str, Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) if "shortest_edge" in size: __snake_case : int = get_resize_output_image_size(UpperCAmelCase , size["shortest_edge"] , default_to_square=UpperCAmelCase ) elif "height" in size and "width" in size: __snake_case : Any = (size["height"], size["width"]) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __snake_case : int = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Tuple: '''simple docstring''' return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __snake_case : Union[str, Any] = to_numpy_array(UpperCAmelCase ) if do_resize: __snake_case : List[Any] = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) if do_center_crop: __snake_case : str = self.center_crop(UpperCAmelCase , size=UpperCAmelCase ) if do_rescale: __snake_case : str = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) if do_normalize: __snake_case : Dict = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) __snake_case : Any = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) return image def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' __snake_case : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __snake_case : str = resample if resample is not None else self.resample __snake_case : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean __snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std __snake_case : Tuple = size if size is not None else self.size __snake_case : Optional[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) __snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size __snake_case : Dict = get_size_dict(UpperCAmelCase , param_name="crop_size" ) if not valid_images(UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) __snake_case : List[str] = make_batched(UpperCAmelCase ) __snake_case : Optional[int] = [ [ self._preprocess_image( image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , ) for img in video ] for video in videos ] __snake_case : List[str] = {"pixel_values": videos} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
326
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : str =JukeboxTokenizer UpperCAmelCase_ : Tuple ={ "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) __snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : Optional[Any] = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) __snake_case : Tuple = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : int = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
326
1
_UpperCamelCase = frozenset( [ '''prompt''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) _UpperCamelCase = frozenset(['''prompt''', '''negative_prompt''']) _UpperCamelCase = frozenset([]) _UpperCamelCase = frozenset(['''image''']) _UpperCamelCase = frozenset( [ '''image''', '''height''', '''width''', '''guidance_scale''', ] ) _UpperCamelCase = frozenset(['''image''']) _UpperCamelCase = frozenset( [ '''prompt''', '''image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) _UpperCamelCase = frozenset(['''prompt''', '''image''', '''negative_prompt''']) _UpperCamelCase = frozenset( [ # Text guided image variation with an image mask '''prompt''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) _UpperCamelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt''']) _UpperCamelCase = frozenset( [ # image variation with an image mask '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) _UpperCamelCase = frozenset(['''image''', '''mask_image''']) _UpperCamelCase = frozenset( [ '''example_image''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) _UpperCamelCase = frozenset(['''example_image''', '''image''', '''mask_image''']) _UpperCamelCase = frozenset(['''class_labels''']) _UpperCamelCase = frozenset(['''class_labels''']) _UpperCamelCase = frozenset(['''batch_size''']) _UpperCamelCase = frozenset([]) _UpperCamelCase = frozenset(['''batch_size''']) _UpperCamelCase = frozenset([]) _UpperCamelCase = frozenset( [ '''prompt''', '''audio_length_in_s''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) _UpperCamelCase = frozenset(['''prompt''', '''negative_prompt''']) _UpperCamelCase = frozenset(['''input_tokens''']) _UpperCamelCase = frozenset(['''input_tokens'''])
326
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : str UpperCAmelCase_ : str =None @staticmethod def UpperCAmelCase ( ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def UpperCAmelCase ( cls ) -> Tuple: '''simple docstring''' return F"""`pip install {cls.pip_package or cls.name}`""" class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[int] ="optuna" @staticmethod def UpperCAmelCase ( ) -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict: '''simple docstring''' return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' return default_hp_space_optuna(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[str] ="ray" UpperCAmelCase_ : Dict ="'ray[tune]'" @staticmethod def UpperCAmelCase ( ) -> str: '''simple docstring''' return is_ray_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return default_hp_space_ray(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple ="sigopt" @staticmethod def UpperCAmelCase ( ) -> int: '''simple docstring''' return is_sigopt_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict: '''simple docstring''' return default_hp_space_sigopt(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : str ="wandb" @staticmethod def UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' return is_wandb_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return default_hp_space_wandb(UpperCAmelCase ) _UpperCamelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCAmelCase__( ) -> str: __snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase ) > 0: __snake_case : Dict = available_backends[0].name if len(lowercase ) > 1: logger.info( f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
326
1
from math import factorial def lowerCAmelCase__( lowercase : int , lowercase : int ) -> int: # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(lowercase ) // (factorial(lowercase ) * factorial(n - k )) if __name__ == "__main__": print( '''The number of five-card hands possible from a standard''', F'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( '''If a class of 40 students must be arranged into groups of''', F'''4 for group projects, there are {combinations(40, 4)} ways''', '''to arrange them.\n''', ) print( '''If 10 teams are competing in a Formula One race, there''', F'''are {combinations(10, 3)} ways that first, second and''', '''third place can be awarded.''', )
326
import math def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list: __snake_case : Any = end or len(lowercase ) for i in range(lowercase , lowercase ): __snake_case : List[str] = i __snake_case : Union[str, Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __snake_case : Optional[Any] = array[temp_index - 1] temp_index -= 1 __snake_case : Any = temp_index_value return array def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap __snake_case : Any = index __snake_case : Optional[Any] = 2 * index + 1 # Left Node __snake_case : str = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __snake_case : Optional[int] = left_index if right_index < heap_size and array[largest] < array[right_index]: __snake_case : Tuple = right_index if largest != index: __snake_case , __snake_case : int = array[largest], array[index] heapify(lowercase , lowercase , lowercase ) def lowerCAmelCase__( lowercase : list ) -> list: __snake_case : List[str] = len(lowercase ) for i in range(n // 2 , -1 , -1 ): heapify(lowercase , lowercase , lowercase ) for i in range(n - 1 , 0 , -1 ): __snake_case , __snake_case : Optional[Any] = array[0], array[i] heapify(lowercase , 0 , lowercase ) return array def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: __snake_case : Union[str, Any] = low __snake_case : Union[str, Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __snake_case , __snake_case : str = array[j], array[i] i += 1 def lowerCAmelCase__( lowercase : list ) -> list: if len(lowercase ) == 0: return array __snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) ) __snake_case : Dict = 16 return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase ) def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(lowercase ) max_depth -= 1 __snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 ) __snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase ) intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase ) __snake_case : List[str] = p return insertion_sort(lowercase , lowercase , lowercase ) if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip() _UpperCamelCase = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
326
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _lowerCamelCase ( a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Any =CTRLTokenizer UpperCAmelCase_ : str =False UpperCAmelCase_ : int =False def UpperCAmelCase ( self ) -> Any: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case : Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"] __snake_case : List[Any] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) __snake_case : int = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""] __snake_case : List[str] = {"unk_token": "<unk>"} __snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase ) ) def UpperCAmelCase ( self , **UpperCAmelCase ) -> int: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[int] = "adapt react readapt apt" __snake_case : Optional[Any] = "adapt react readapt apt" return input_text, output_text def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case : Optional[Any] = "adapt react readapt apt" __snake_case : List[Any] = "adapt re@@ a@@ c@@ t re@@ adapt apt".split() __snake_case : List[str] = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) __snake_case : Union[str, Any] = tokens + [tokenizer.unk_token] __snake_case : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
326
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowerCAmelCase__( ) -> List[Any]: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case : Any = [1, 2, 3] with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=2 ) with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def lowerCAmelCase__( lowercase : Dict ) -> Dict: __snake_case : Any = [1, 2] __snake_case : Dict = {"a": 1, "b": 2} __snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]} __snake_case : int = {"a": {"1": 1}, "b": 2} __snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case : Dict = [2, 3] __snake_case : Tuple = {"a": 2, "b": 3} __snake_case : int = {"a": [2, 3], "b": [4, 5]} __snake_case : Dict = {"a": {"1": 2}, "b": 3} __snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
326
1
from __future__ import annotations def lowerCAmelCase__( lowercase : list ) -> float: if not nums: raise ValueError("List is empty" ) return sum(lowercase ) / len(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
326
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any: __snake_case : Optional[Any] = False __snake_case : Optional[Any] = search_prob __snake_case : str = start_temperate __snake_case : List[Any] = [] __snake_case : str = 0 __snake_case : Dict = None while not search_end: __snake_case : List[Any] = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case : List[Any] = current_state scores.append(lowercase ) iterations += 1 __snake_case : Dict = None __snake_case : str = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor __snake_case : int = neighbors.pop(lowercase ) __snake_case : Optional[Any] = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case : Any = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case : List[str] = picked_neighbor else: __snake_case : Optional[Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case : str = picked_neighbor __snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case : Optional[Any] = True else: __snake_case : str = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(lowercase ) , lowercase ) plt.xlabel("Iterations" ) plt.ylabel("Function values" ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) _UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any: return (3 * x**2) - (6 * y) _UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) _UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
326
1
def lowerCAmelCase__( lowercase : int ) -> int: if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(lowercase , lowercase ): raise TypeError("Input value must be a 'int' type" ) return bin(lowercase ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
326
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"] UpperCAmelCase_ : Tuple ="FlavaImageProcessor" UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast") def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) __snake_case : List[Any] = kwargs.pop("feature_extractor" ) __snake_case : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = self.image_processor def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __snake_case : Union[str, Any] = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if images is not None: __snake_case : Union[str, Any] = self.image_processor( UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if text is not None and images is not None: encoding.update(UpperCAmelCase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[Any] = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
326
1
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 _UpperCamelCase = { '''return_dict''': False, '''output_hidden_states''': True, '''output_attentions''': True, '''torchscript''': True, '''torch_dtype''': '''float16''', '''use_bfloat16''': True, '''tf_legacy_loss''': True, '''pruned_heads''': {'''a''': 1}, '''tie_word_embeddings''': False, '''is_decoder''': True, '''cross_attention_hidden_size''': 128, '''add_cross_attention''': True, '''tie_encoder_decoder''': True, '''max_length''': 50, '''min_length''': 3, '''do_sample''': True, '''early_stopping''': True, '''num_beams''': 3, '''num_beam_groups''': 3, '''diversity_penalty''': 0.5, '''temperature''': 2.0, '''top_k''': 10, '''top_p''': 0.7, '''typical_p''': 0.2, '''repetition_penalty''': 0.8, '''length_penalty''': 0.8, '''no_repeat_ngram_size''': 5, '''encoder_no_repeat_ngram_size''': 5, '''bad_words_ids''': [1, 2, 3], '''num_return_sequences''': 3, '''chunk_size_feed_forward''': 5, '''output_scores''': True, '''return_dict_in_generate''': True, '''forced_bos_token_id''': 2, '''forced_eos_token_id''': 3, '''remove_invalid_values''': True, '''architectures''': ['''BertModel'''], '''finetuning_task''': '''translation''', '''id2label''': {0: '''label'''}, '''label2id''': {'''label''': '''0'''}, '''tokenizer_class''': '''BertTokenizerFast''', '''prefix''': '''prefix''', '''bos_token_id''': 6, '''pad_token_id''': 7, '''eos_token_id''': 8, '''sep_token_id''': 9, '''decoder_start_token_id''': 10, '''exponential_decay_length_penalty''': (5, 1.01), '''suppress_tokens''': [0, 1], '''begin_suppress_tokens''': 2, '''task_specific_params''': {'''translation''': '''some_params'''}, '''problem_type''': '''regression''', } @is_staging_test class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @classmethod def UpperCAmelCase ( cls ) -> Optional[int]: '''simple docstring''' __snake_case : str = TOKEN HfFolder.save_token(UpperCAmelCase ) @classmethod def UpperCAmelCase ( cls ) -> List[str]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-config-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-config" ) except HTTPError: pass def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("test-config" , use_auth_token=self._token ) __snake_case : Union[str, Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase , repo_id="test-config" , push_to_hub=UpperCAmelCase , use_auth_token=self._token ) __snake_case : Tuple = BertConfig.from_pretrained(F"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token ) __snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase , repo_id="valid_org/test-config-org" , push_to_hub=UpperCAmelCase , use_auth_token=self._token ) __snake_case : Any = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' CustomConfig.register_for_auto_class() __snake_case : List[str] = CustomConfig(attribute=42 ) config.push_to_hub("test-dynamic-config" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} ) __snake_case : int = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=UpperCAmelCase ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , "CustomConfig" ) self.assertEqual(new_config.attribute , 42 ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __snake_case : Any = c.n_embd + 1 # int __snake_case : Dict = c.resid_pdrop + 1.0 # float __snake_case : Optional[int] = not c.scale_attn_weights # bool __snake_case : Union[str, Any] = c.summary_type + "foo" # str c.update_from_string( F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" ) self.assertEqual(UpperCAmelCase , c.n_embd , "mismatch for key: n_embd" ) self.assertEqual(UpperCAmelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" ) self.assertEqual(UpperCAmelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" ) self.assertEqual(UpperCAmelCase , c.summary_type , "mismatch for key: summary_type" ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[Any] = PretrainedConfig() __snake_case : List[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( UpperCAmelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] ) __snake_case : Tuple = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )] if len(UpperCAmelCase ) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" F""" {", ".join(UpperCAmelCase )}.""" ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' with self.assertRaises(UpperCAmelCase ): # config is in subfolder, the following should not work without specifying the subfolder __snake_case : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" ) __snake_case : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[str] = mock.Mock() __snake_case : List[Any] = 500 __snake_case : int = {} __snake_case : Dict = HTTPError __snake_case : str = {} # Download this model to make sure it's in the cache. __snake_case : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=UpperCAmelCase ) as mock_head: __snake_case : Tuple = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[Any] = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : int = AutoConfig.from_pretrained("bert-base-cased" ) __snake_case : str = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(UpperCAmelCase ) __snake_case : List[str] = 2 json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , "config.4.0.0.json" ) , "w" ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __snake_case : List[str] = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __snake_case : Dict = ["config.42.0.0.json"] __snake_case : str = 768 configuration.save_pretrained(UpperCAmelCase ) shutil.move(os.path.join(UpperCAmelCase , "config.4.0.0.json" ) , os.path.join(UpperCAmelCase , "config.42.0.0.json" ) ) __snake_case : List[str] = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 768 ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Union[str, Any] = "hf-internal-testing/test-two-configs" import transformers as new_transformers __snake_case : Union[str, Any] = "v4.0.0" __snake_case , __snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained( UpperCAmelCase , return_unused_kwargs=UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(UpperCAmelCase , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __snake_case : str = "v3.0.0" __snake_case : Dict = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase ) self.assertEqual(old_configuration.hidden_size , 768 )
326
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } _UpperCamelCase = { '''camembert-base''': 512, } _UpperCamelCase = '''▁''' class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : str =["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' __snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token __snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) __snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase ) ) __snake_case : Dict = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __snake_case : Optional[int] = len(self.fairseq_tokens_to_ids ) __snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Dict = [self.cls_token_id] __snake_case : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1] def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(UpperCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = [] __snake_case : Union[str, Any] = "" __snake_case : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase ) + token __snake_case : List[Any] = True __snake_case : Union[str, Any] = [] else: current_sub_tokens.append(UpperCAmelCase ) __snake_case : int = False out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def __getstate__( self ) -> List[Any]: '''simple docstring''' __snake_case : str = self.__dict__.copy() __snake_case : Optional[Any] = None return state def __setstate__( self , UpperCAmelCase ) -> str: '''simple docstring''' __snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case : List[str] = {} __snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case : Optional[Any] = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: __snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,)
326
1
from maths.prime_factors import prime_factors def lowerCAmelCase__( lowercase : int ) -> int: if not isinstance(lowercase , lowercase ): __snake_case : Optional[int] = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(lowercase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
326
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool: __snake_case : List[str] = len(lowercase ) __snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __snake_case : Optional[Any] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __snake_case : Union[str, Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __snake_case : List[str] = subset[i - 1][j] if arr[i - 1] <= j: __snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
326
1
_UpperCamelCase = '''Alexander Joslin''' import operator as op from .stack import Stack def lowerCAmelCase__( lowercase : str ) -> int: __snake_case : Optional[int] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} __snake_case : Stack[int] = Stack() __snake_case : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowercase ) ) elif i in operators: # RULE 2 operator_stack.push(lowercase ) elif i == ")": # RULE 4 __snake_case : Union[str, Any] = operator_stack.peek() operator_stack.pop() __snake_case : str = operand_stack.peek() operand_stack.pop() __snake_case : List[Any] = operand_stack.peek() operand_stack.pop() __snake_case : int = operators[opr](lowercase , lowercase ) operand_stack.push(lowercase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": _UpperCamelCase = '''(5 + ((4 * 2) * (2 + 3)))''' # answer = 45 print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
326
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _UpperCamelCase = 4 _UpperCamelCase = 3 class _lowerCamelCase ( a ): """simple docstring""" pass def lowerCAmelCase__( lowercase : List[str] ) -> Any: for shard in shards: for i in range(lowercase ): yield {"i": i, "shard": shard} def lowerCAmelCase__( ) -> Optional[int]: __snake_case : List[Any] = int(os.environ["RANK"] ) __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : List[str] = ArgumentParser() parser.add_argument("--streaming" , type=lowercase ) parser.add_argument("--local_rank" , type=lowercase ) parser.add_argument("--num_workers" , type=lowercase , default=0 ) __snake_case : Any = parser.parse_args() __snake_case : Dict = args.streaming __snake_case : Union[str, Any] = args.num_workers __snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]} __snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase ) if not streaming: __snake_case : Any = Dataset.from_list(list(lowercase ) ) __snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase ) __snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase ) __snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __snake_case : List[str] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __snake_case : Dict = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
326
1
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser( description=( '''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2''']) parser.add_argument('''--model_name''', default='''roberta-large''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') _UpperCamelCase = parser.parse_args() if args.model_type == "roberta": _UpperCamelCase = RobertaForMaskedLM.from_pretrained(args.model_name) _UpperCamelCase = '''roberta''' elif args.model_type == "gpt2": _UpperCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name) _UpperCamelCase = '''transformer''' _UpperCamelCase = model.state_dict() _UpperCamelCase = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: _UpperCamelCase = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: _UpperCamelCase = F'''{prefix}.embeddings.{w}.weight''' _UpperCamelCase = state_dict[param_name] for w in ["weight", "bias"]: _UpperCamelCase = F'''{prefix}.embeddings.LayerNorm.{w}''' _UpperCamelCase = state_dict[param_name] # Transformer Blocks # _UpperCamelCase = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: _UpperCamelCase = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] _UpperCamelCase = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: _UpperCamelCase = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: _UpperCamelCase = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: _UpperCamelCase = state_dict[F'''lm_head.dense.{w}'''] _UpperCamelCase = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: _UpperCamelCase = state_dict[F'''{prefix}.ln_f.{w}'''] _UpperCamelCase = state_dict['''lm_head.weight'''] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
326
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int: __snake_case : List[Any] = limit + 1 __snake_case : List[str] = [0] * limit for first_term in range(1 , lowercase ): for n in range(lowercase , lowercase , lowercase ): __snake_case : Union[str, Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
326
1
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow _UpperCamelCase = logging.getLogger() @unittest.skip("Temporarily disable the doc tests." ) @require_torch @require_tf @slow class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , ) -> List[Any]: '''simple docstring''' __snake_case : List[Any] = [file for file in os.listdir(UpperCAmelCase ) if os.path.isfile(os.path.join(UpperCAmelCase , UpperCAmelCase ) )] if identifier is not None: __snake_case : int = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(UpperCAmelCase , UpperCAmelCase ): for n_ in n_identifier: __snake_case : int = [file for file in files if n_ not in file] else: __snake_case : List[str] = [file for file in files if n_identifier not in file] __snake_case : str = ignore_files or [] ignore_files.append("__init__.py" ) __snake_case : Tuple = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , UpperCAmelCase ) if only_modules: __snake_case : Optional[int] = file.split("." )[0] try: __snake_case : List[Any] = getattr(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = doctest.DocTestSuite(UpperCAmelCase ) __snake_case : Optional[int] = unittest.TextTestRunner().run(UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: __snake_case : str = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : List[str] = Path("src/transformers" ) __snake_case : List[Any] = "modeling" __snake_case : Tuple = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase , ignore_files=UpperCAmelCase ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : List[Any] = Path("src/transformers" ) __snake_case : Tuple = "tokenization" self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = Path("src/transformers" ) __snake_case : List[Any] = "configuration" self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Any = Path("src/transformers" ) __snake_case : Tuple = ["configuration", "modeling", "tokenization"] self.analyze_directory(UpperCAmelCase , n_identifier=UpperCAmelCase ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = Path("docs/source" ) __snake_case : Optional[int] = ["favicon.ico"] self.analyze_directory(UpperCAmelCase , ignore_files=UpperCAmelCase , only_modules=UpperCAmelCase )
326
from __future__ import annotations def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]: __snake_case : List[str] = word_bank or [] # create a table __snake_case : int = len(lowercase ) + 1 __snake_case : list[list[list[str]]] = [] for _ in range(lowercase ): table.append([] ) # seed value __snake_case : Optional[int] = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowercase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowercase )] == word: __snake_case : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowercase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowercase )]: combination.reverse() return table[len(lowercase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
326
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _UpperCamelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) _UpperCamelCase = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) _UpperCamelCase = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions _UpperCamelCase = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) _UpperCamelCase = tf.keras.preprocessing.image.img_to_array(test_image) _UpperCamelCase = np.expand_dims(test_image, axis=0) _UpperCamelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _UpperCamelCase = '''Normal''' if result[0][0] == 1: _UpperCamelCase = '''Abnormality detected'''
326
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = parent __snake_case : Tuple = batch_size __snake_case : List[str] = seq_length __snake_case : Optional[int] = is_training __snake_case : int = use_attention_mask __snake_case : Union[str, Any] = use_token_type_ids __snake_case : Any = use_labels __snake_case : List[str] = vocab_size __snake_case : int = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : List[Any] = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : Dict = initializer_range __snake_case : List[Any] = num_choices __snake_case : Union[str, Any] = rescale_embeddings __snake_case : List[Any] = attention_type __snake_case : str = use_bias __snake_case : Dict = block_size __snake_case : Optional[Any] = num_random_blocks def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_attention_mask: __snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Union[str, Any] = None if self.use_token_type_ids: __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[int] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs __snake_case : int = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class _lowerCamelCase ( a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) UpperCAmelCase_ : Dict =False UpperCAmelCase_ : str =False def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Dict = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Any: '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_hidden_states_output() @slow def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = model_class(UpperCAmelCase ) @jax.jit def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ): return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase ) with self.subTest("JIT Enabled" ): __snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int: '''simple docstring''' if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
326
1
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} _UpperCamelCase = { '''vocab_file''': { '''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''', '''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''', }, } _UpperCamelCase = { '''facebook/esm2_t6_8M_UR50D''': 1024, '''facebook/esm2_t12_35M_UR50D''': 1024, } def lowerCAmelCase__( lowercase : int ) -> Dict: with open(lowercase , "r" ) as f: __snake_case : Union[str, Any] = f.read().splitlines() return [l.strip() for l in lines] class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple =VOCAB_FILES_NAMES UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Optional[Any] =["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> int: '''simple docstring''' super().__init__(**UpperCAmelCase ) __snake_case : List[str] = load_vocab_file(UpperCAmelCase ) __snake_case : Tuple = dict(enumerate(self.all_tokens ) ) __snake_case : Dict = {tok: ind for ind, tok in enumerate(self.all_tokens )} __snake_case : List[Any] = unk_token __snake_case : Dict = cls_token __snake_case : Tuple = pad_token __snake_case : int = mask_token __snake_case : List[Any] = eos_token __snake_case : List[Any] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return self._id_to_token.get(UpperCAmelCase , self.unk_token ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) ) def UpperCAmelCase ( self , UpperCAmelCase , **UpperCAmelCase ) -> Dict: '''simple docstring''' return text.split() def UpperCAmelCase ( self , UpperCAmelCase=False ) -> str: '''simple docstring''' return len(self._id_to_token ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return {token: i for i, token in enumerate(self.all_tokens )} def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return self._id_to_token.get(UpperCAmelCase , self.unk_token ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : Union[str, Any] = [self.cls_token_id] __snake_case : Any = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] __snake_case : Union[str, Any] = [1] + ([0] * len(UpperCAmelCase )) + [1] if token_ids_a is not None: mask += [0] * len(UpperCAmelCase ) + [1] return mask def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" ) with open(UpperCAmelCase , "w" ) as f: f.write("\n".join(self.all_tokens ) ) return (vocab_file,) @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return self.get_vocab_size(with_added_tokens=UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int: '''simple docstring''' return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
326
import argparse import datetime def lowerCAmelCase__( lowercase : str ) -> str: __snake_case : int = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } __snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase ) < 11: raise ValueError("Must be 10 characters long" ) # Get month __snake_case : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12" ) __snake_case : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get day __snake_case : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31" ) # Get second separator __snake_case : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get year __snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation __snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) ) # Start math if m <= 2: __snake_case : Optional[Any] = y - 1 __snake_case : Tuple = m + 12 # maths var __snake_case : int = int(str(lowercase )[:2] ) __snake_case : int = int(str(lowercase )[2:] ) __snake_case : int = int(2.6 * m - 5.3_9 ) __snake_case : int = int(c / 4 ) __snake_case : int = int(k / 4 ) __snake_case : int = int(d + k ) __snake_case : int = int(t + u + v + x ) __snake_case : int = int(z - (2 * c) ) __snake_case : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response __snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) _UpperCamelCase = parser.parse_args() zeller(args.date_input)
326
1
from math import log from scipy.constants import Boltzmann, physical_constants _UpperCamelCase = 300 # TEMPERATURE (unit = K) def lowerCAmelCase__( lowercase : float , lowercase : float , lowercase : float , ) -> float: if donor_conc <= 0: raise ValueError("Donor concentration should be positive" ) elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive" ) elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive" ) elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
326
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int: if index == r: for j in range(lowercase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __snake_case : Union[str, Any] = arr[i] combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]: # A temporary array to store all combination one by one __snake_case : Tuple = [0] * r # Print all combination using temporary array 'data[]' combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 ) if __name__ == "__main__": # Driver code to check the function above _UpperCamelCase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
326
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
326
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] _UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def lowerCAmelCase__( lowercase : str ) -> Optional[Any]: __snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" ) return sd def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict: __snake_case : Tuple = OrderedDict() __snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __snake_case : Optional[Any] = key for name_pair in rename_keys_prefix: __snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __snake_case : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __snake_case : List[Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]: assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: __snake_case : Any = "pretraining" if "vcr" in checkpoint_path: __snake_case : Optional[Any] = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: __snake_case : Tuple = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: __snake_case : Any = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 512} __snake_case : Any = "multichoice" elif "vqa_advanced" in checkpoint_path: __snake_case : List[Any] = {"visual_embedding_dim": 2048} __snake_case : Optional[Any] = "vqa_advanced" elif "vqa" in checkpoint_path: __snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129} __snake_case : Union[str, Any] = "vqa" elif "nlvr" in checkpoint_path: __snake_case : Tuple = { "visual_embedding_dim": 1024, "num_labels": 2, } __snake_case : List[Any] = "nlvr" __snake_case : Union[str, Any] = VisualBertConfig(**lowercase ) # Load State Dict __snake_case : Any = load_state_dict(lowercase ) __snake_case : Dict = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": __snake_case : Optional[Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": __snake_case : Tuple = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": __snake_case : Tuple = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": __snake_case : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') _UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
326
1
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = parent __snake_case : Tuple = batch_size __snake_case : List[str] = seq_length __snake_case : Optional[int] = is_training __snake_case : int = use_attention_mask __snake_case : Union[str, Any] = use_token_type_ids __snake_case : Any = use_labels __snake_case : List[str] = vocab_size __snake_case : int = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : List[Any] = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : Dict = initializer_range __snake_case : List[Any] = num_choices __snake_case : Union[str, Any] = rescale_embeddings __snake_case : List[Any] = attention_type __snake_case : str = use_bias __snake_case : Dict = block_size __snake_case : Optional[Any] = num_random_blocks def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_attention_mask: __snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Union[str, Any] = None if self.use_token_type_ids: __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[int] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs __snake_case : int = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class _lowerCamelCase ( a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) UpperCAmelCase_ : Dict =False UpperCAmelCase_ : str =False def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Dict = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Any: '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_hidden_states_output() @slow def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = model_class(UpperCAmelCase ) @jax.jit def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ): return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase ) with self.subTest("JIT Enabled" ): __snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int: '''simple docstring''' if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
326
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple: # Load configuration defined in the metadata file with open(lowercase ) as metadata_file: __snake_case : int = json.load(lowercase ) __snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] ) # Load in the weights from the checkpoint_path __snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"] # Load the entity vocab file __snake_case : Tuple = load_original_entity_vocab(lowercase ) # add an entry for [MASK2] __snake_case : Optional[int] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 __snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks __snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase ) __snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(lowercase ) with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f: __snake_case : Tuple = json.load(lowercase ) __snake_case : List[Any] = "MLukeTokenizer" with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(lowercase , lowercase ) __snake_case : Any = MLukeTokenizer.from_pretrained(lowercase ) # Initialize the embeddings of the special tokens __snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0] __snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0] __snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"] __snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: __snake_case : List[Any] = state_dict[bias_name] __snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 ) __snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 ) __snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self.""" __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] __snake_case : str = state_dict[prefix + matrix_name] __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"] __snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Any = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' __snake_case : List[Any] = state_dict["entity_predictions.bias"] __snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) __snake_case : Any = LukeForMaskedLM(config=lowercase ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) __snake_case : int = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): __snake_case : str = state_dict[key] else: __snake_case : str = state_dict[key] __snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase ) if set(lowercase ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(lowercase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs __snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" ) __snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." __snake_case : Union[str, Any] = (0, 9) __snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : Any = model(**lowercase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : Optional[Any] = torch.Size((1, 33, 768) ) __snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : str = torch.Size((1, 1, 768) ) __snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction __snake_case : str = MLukeTokenizer.from_pretrained(lowercase ) __snake_case : Dict = "Tokyo is the capital of <mask>." __snake_case : Union[str, Any] = (24, 30) __snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : int = model(**lowercase ) __snake_case : Dict = encoding["input_ids"][0].tolist() __snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) __snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase ) __snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item() __snake_case : Optional[int] = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase ) ) model.save_pretrained(lowercase ) def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]: __snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"] __snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )] __snake_case : Any = {} for entry in data: __snake_case : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: __snake_case : Optional[int] = entity_id break __snake_case : Union[str, Any] = f"""{language}:{entity_name}""" __snake_case : Any = entity_id return new_mapping if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) _UpperCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCamelCase = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. _UpperCamelCase = importlib.util.spec_from_file_location( '''transformers''', os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) _UpperCamelCase = spec.loader.load_module() _UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _UpperCamelCase = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') _UpperCamelCase = { '''CLIPConfigMixin''', '''DecisionTransformerConfigMixin''', '''EncoderDecoderConfigMixin''', '''RagConfigMixin''', '''SpeechEncoderDecoderConfigMixin''', '''VisionEncoderDecoderConfigMixin''', '''VisionTextDualEncoderConfigMixin''', } def lowerCAmelCase__( ) -> Optional[Any]: __snake_case : List[Any] = [] for config_class in list(CONFIG_MAPPING.values() ): __snake_case : List[str] = False # source code of `config_class` __snake_case : Any = inspect.getsource(lowercase ) __snake_case : Any = _re_checkpoint.findall(lowercase ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` __snake_case , __snake_case : Any = checkpoint # verify the checkpoint name corresponds to the checkpoint link __snake_case : List[str] = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: __snake_case : Optional[int] = True break __snake_case : str = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowercase ) if len(lowercase ) > 0: __snake_case : int = "\n".join(sorted(lowercase ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
326
from maths.prime_factors import prime_factors def lowerCAmelCase__( lowercase : int ) -> int: if not isinstance(lowercase , lowercase ): __snake_case : Optional[int] = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(lowercase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
326
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple ="cvt" def __init__( self , UpperCAmelCase=3 , UpperCAmelCase=[7, 3, 3] , UpperCAmelCase=[4, 2, 2] , UpperCAmelCase=[2, 1, 1] , UpperCAmelCase=[64, 192, 384] , UpperCAmelCase=[1, 3, 6] , UpperCAmelCase=[1, 2, 10] , UpperCAmelCase=[4.0, 4.0, 4.0] , UpperCAmelCase=[0.0, 0.0, 0.0] , UpperCAmelCase=[0.0, 0.0, 0.0] , UpperCAmelCase=[0.0, 0.0, 0.1] , UpperCAmelCase=[True, True, True] , UpperCAmelCase=[False, False, True] , UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , UpperCAmelCase=[3, 3, 3] , UpperCAmelCase=[1, 1, 1] , UpperCAmelCase=[2, 2, 2] , UpperCAmelCase=[1, 1, 1] , UpperCAmelCase=[1, 1, 1] , UpperCAmelCase=0.02 , UpperCAmelCase=1E-12 , **UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase ) __snake_case : Optional[int] = num_channels __snake_case : Tuple = patch_sizes __snake_case : int = patch_stride __snake_case : int = patch_padding __snake_case : Tuple = embed_dim __snake_case : Optional[int] = num_heads __snake_case : Tuple = depth __snake_case : Optional[int] = mlp_ratio __snake_case : List[Any] = attention_drop_rate __snake_case : Optional[int] = drop_rate __snake_case : Dict = drop_path_rate __snake_case : Any = qkv_bias __snake_case : List[str] = cls_token __snake_case : Union[str, Any] = qkv_projection_method __snake_case : Union[str, Any] = kernel_qkv __snake_case : Optional[int] = padding_kv __snake_case : Optional[int] = stride_kv __snake_case : Optional[int] = padding_q __snake_case : Tuple = stride_q __snake_case : int = initializer_range __snake_case : Union[str, Any] = layer_norm_eps
326
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) __snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" ) __snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids __snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids __snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) __snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits __snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean() __snake_case : Any = -(labels.shape[-1] * loss.item()) __snake_case : List[str] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
326
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , ) -> str: '''simple docstring''' __snake_case : int = parent __snake_case : Any = 13 __snake_case : str = 7 __snake_case : Tuple = True __snake_case : Any = True __snake_case : Dict = True __snake_case : List[str] = True __snake_case : Dict = True __snake_case : Optional[Any] = False __snake_case : Tuple = False __snake_case : Union[str, Any] = False __snake_case : Optional[Any] = 2 __snake_case : str = 99 __snake_case : Optional[Any] = 0 __snake_case : str = 32 __snake_case : Tuple = 2 __snake_case : Optional[Any] = 4 __snake_case : Union[str, Any] = 0.1 __snake_case : List[Any] = 0.1 __snake_case : Optional[int] = 512 __snake_case : List[Any] = 16 __snake_case : List[str] = 2 __snake_case : Union[str, Any] = 0.02 __snake_case : str = 3 __snake_case : int = 4 __snake_case : Optional[int] = "last" __snake_case : Any = True __snake_case : Any = None __snake_case : Tuple = 0 def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) __snake_case : int = None if self.use_input_lengths: __snake_case : str = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __snake_case : str = None if self.use_token_type_ids: __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __snake_case : int = None __snake_case : Optional[Any] = None __snake_case : Any = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Any = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : Any = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = TFFlaubertModel(config=UpperCAmelCase ) __snake_case : int = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} __snake_case : Any = model(UpperCAmelCase ) __snake_case : Tuple = [input_ids, input_mask] __snake_case : str = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> int: '''simple docstring''' __snake_case : int = TFFlaubertWithLMHeadModel(UpperCAmelCase ) __snake_case : Optional[Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} __snake_case : Optional[int] = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[Any]: '''simple docstring''' __snake_case : str = TFFlaubertForQuestionAnsweringSimple(UpperCAmelCase ) __snake_case : Any = {"input_ids": input_ids, "lengths": input_lengths} __snake_case : Tuple = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[str]: '''simple docstring''' __snake_case : int = TFFlaubertForSequenceClassification(UpperCAmelCase ) __snake_case : Union[str, Any] = {"input_ids": input_ids, "lengths": input_lengths} __snake_case : str = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.num_labels __snake_case : Any = TFFlaubertForTokenClassification(config=UpperCAmelCase ) __snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : int = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[Any]: '''simple docstring''' __snake_case : Optional[int] = self.num_choices __snake_case : Any = TFFlaubertForMultipleChoice(config=UpperCAmelCase ) __snake_case : Any = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : Dict = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : Any = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __snake_case : List[str] = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : int = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = config_and_inputs __snake_case : List[Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class _lowerCamelCase ( a , a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : List[Any] =( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase_ : List[Any] =( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCAmelCase_ : Any =( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase_ : Optional[int] =False UpperCAmelCase_ : Tuple =False def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : List[Any] = TFFlaubertModelTester(self ) __snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=37 ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCAmelCase ) @slow def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Union[str, Any] = TFFlaubertModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_tf @require_sentencepiece @require_tokenizers class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) __snake_case : Optional[Any] = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" __snake_case : Union[str, Any] = model(UpperCAmelCase )[0] __snake_case : Tuple = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , UpperCAmelCase ) # compare the actual values for a slice. __snake_case : Dict = tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
326
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]: '''simple docstring''' super().__init__(UpperCAmelCase ) __snake_case : Optional[int] = proj_size __snake_case : str = CLIPVisionModel(UpperCAmelCase ) __snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase ) __snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size ) __snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]: '''simple docstring''' __snake_case : int = self.model(pixel_values=UpperCAmelCase ) __snake_case : Optional[int] = clip_output.pooler_output __snake_case : Any = self.mapper(latent_states[:, None] ) __snake_case : Any = self.final_layer_norm(UpperCAmelCase ) __snake_case : str = self.proj_out(UpperCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : List[Any] = (config.num_hidden_layers + 1) // 5 __snake_case : Dict = config.hidden_size __snake_case : str = 1 __snake_case : List[Any] = nn.ModuleList( [ BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase ) for _ in range(UpperCAmelCase ) ] ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' for block in self.blocks: __snake_case : int = block(UpperCAmelCase ) return hidden_states
326
1
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _UpperCamelCase = 4 _UpperCamelCase = 3 class _lowerCamelCase ( a ): """simple docstring""" pass def lowerCAmelCase__( lowercase : List[str] ) -> Any: for shard in shards: for i in range(lowercase ): yield {"i": i, "shard": shard} def lowerCAmelCase__( ) -> Optional[int]: __snake_case : List[Any] = int(os.environ["RANK"] ) __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : List[str] = ArgumentParser() parser.add_argument("--streaming" , type=lowercase ) parser.add_argument("--local_rank" , type=lowercase ) parser.add_argument("--num_workers" , type=lowercase , default=0 ) __snake_case : Any = parser.parse_args() __snake_case : Dict = args.streaming __snake_case : Union[str, Any] = args.num_workers __snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]} __snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase ) if not streaming: __snake_case : Any = Dataset.from_list(list(lowercase ) ) __snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase ) __snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase ) __snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __snake_case : List[str] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __snake_case : Dict = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
326
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
326
1
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging _UpperCamelCase = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase = 101 ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = length def __len__( self ) -> Optional[Any]: '''simple docstring''' return self.length def __getitem__( self , UpperCAmelCase ) -> int: '''simple docstring''' return i class _lowerCamelCase : """simple docstring""" def __call__( self , UpperCAmelCase ) -> Any: '''simple docstring''' return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )} class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self ) -> str: '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. __snake_case : int = nn.Linear(120 , 80 ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class _lowerCamelCase ( a ): """simple docstring""" @require_torch_neuroncore def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = F"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : Tuple = F"""--output_dir {output_dir}""".split() __snake_case : Optional[Any] = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _lowerCamelCase ( a ): """simple docstring""" @require_torch_multi_gpu def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : str = F"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : str = F"""--output_dir {output_dir}""".split() __snake_case : Optional[Any] = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py _UpperCamelCase = HfArgumentParser((TrainingArguments,)) _UpperCamelCase = parser.parse_args_into_dataclasses()[0] logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: _UpperCamelCase = DummyDataset(dataset_length) def lowerCAmelCase__( lowercase : EvalPrediction ) -> Dict: __snake_case : str = list(range(len(lowercase ) ) ) __snake_case : Dict = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} _UpperCamelCase = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) _UpperCamelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _UpperCamelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _UpperCamelCase = 2 _UpperCamelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _UpperCamelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _UpperCamelCase = None
326
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = torch.device('''cpu''') def lowerCAmelCase__( ) -> Any: __snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCAmelCase__( lowercase : Dict ) -> List[Any]: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] ) def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]: __snake_case : List[Any] = dct.pop(lowercase ) __snake_case : List[Any] = val def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple: __snake_case : Optional[Any] = [] for k in state_dict.keys(): __snake_case : Union[str, Any] = k if ".pwconv" in k: __snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: __snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: __snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: __snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: __snake_case : int = k_new.split("." ) if ls[2].isdigit(): __snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: __snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]: __snake_case : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __snake_case : Tuple = 1000 __snake_case : Any = "huggingface/label-files" __snake_case : int = "imagenet-1k-id2label.json" __snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) __snake_case : str = {int(lowercase ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __snake_case : Optional[Any] = [3, 3, 6, 4] __snake_case : Optional[int] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __snake_case : List[str] = [3, 3, 9, 6] __snake_case : Optional[Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __snake_case : Optional[int] = [4, 3, 10, 5] __snake_case : Dict = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __snake_case : str = [4, 4, 12, 6] __snake_case : Optional[Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): __snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase ) else: __snake_case : Tuple = torch.load(lowercase , map_location="cpu" ) __snake_case : Optional[int] = checkpoint __snake_case : Any = create_rename_keys(lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) # load HuggingFace model __snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval() hf_model.load_state_dict(lowercase ) # prepare test inputs __snake_case : Optional[Any] = prepare_img() __snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" ) __snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" ) # compare outputs from both models __snake_case : str = get_expected_output(lowercase ) __snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _UpperCamelCase = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
326
1
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _UpperCamelCase = 10 def lowerCAmelCase__( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int: for i in range(lowercase , lowercase ): if array[i] == target: return i return -1 def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> int: __snake_case : str = 0 __snake_case : Tuple = len(lowercase ) while left <= right: if right - left < precision: return lin_search(lowercase , lowercase , lowercase , lowercase ) __snake_case : Dict = (left + right) // 3 + 1 __snake_case : Dict = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __snake_case : Optional[int] = one_third - 1 elif array[two_third] < target: __snake_case : str = two_third + 1 else: __snake_case : Union[str, Any] = one_third + 1 __snake_case : str = two_third - 1 else: return -1 def lowerCAmelCase__( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int: if left < right: if right - left < precision: return lin_search(lowercase , lowercase , lowercase , lowercase ) __snake_case : Tuple = (left + right) // 3 + 1 __snake_case : List[str] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowercase , one_third - 1 , lowercase , lowercase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowercase , lowercase , lowercase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase , lowercase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = input('''Enter numbers separated by comma:\n''').strip() _UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), F"List must be ordered.\n{collection}." _UpperCamelCase = int(input('''Enter the number to be found in the list:\n''').strip()) _UpperCamelCase = ite_ternary_search(collection, target) _UpperCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'''Iterative search: {target} found at positions: {resulta}''') print(F'''Recursive search: {target} found at positions: {resulta}''') else: print('''Not found''')
326
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) _UpperCamelCase = logging.getLogger(__name__) def lowerCAmelCase__( lowercase : str ) -> List[str]: __snake_case : int = git.Repo(search_parent_directories=lowercase ) __snake_case : Union[str, Any] = { "repo_id": str(lowercase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f: json.dump(lowercase , lowercase , indent=4 ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]: if params.n_gpu <= 0: __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = -1 __snake_case : Union[str, Any] = True __snake_case : Tuple = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : int = int(os.environ["N_GPU_NODE"] ) __snake_case : Union[str, Any] = int(os.environ["RANK"] ) # number of nodes / node ID __snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node __snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node __snake_case : Union[str, Any] = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __snake_case : Any = 1 __snake_case : str = 0 __snake_case : Optional[Any] = 0 __snake_case : Dict = 0 __snake_case : int = 1 __snake_case : Optional[Any] = 1 __snake_case : Tuple = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0 __snake_case : List[Any] = params.n_nodes > 1 # summary __snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
326
1
# Function to print upper half of diamond (pyramid) def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]: for i in range(0 , lowercase ): for _ in range(0 , n - i - 1 ): # printing spaces print(" " , end="" ) for _ in range(0 , i + 1 ): # printing stars print("* " , end="" ) print() def lowerCAmelCase__( lowercase : Union[str, Any] ) -> List[str]: for i in range(lowercase , 0 , -1 ): for _ in range(lowercase , 0 , -1 ): # printing stars print("* " , end="" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(" " , end="" ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> Tuple: if n <= 0: print(" ... .... nothing printing :(" ) return floyd(lowercase ) # upper half reverse_floyd(lowercase ) # lower half if __name__ == "__main__": print(R'''| /\ | |- | |- |--| |\ /| |-''') print(R'''|/ \| |- |_ |_ |__| | \/ | |_''') _UpperCamelCase = 1 while K: _UpperCamelCase = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) _UpperCamelCase = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
326
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : str =JukeboxTokenizer UpperCAmelCase_ : Tuple ={ "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) __snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : Optional[Any] = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) __snake_case : Tuple = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : int = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
326
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py _UpperCamelCase = '''src/diffusers''' # Matches is_xxx_available() _UpperCamelCase = re.compile(R'''is\_([a-z_]*)_available\(\)''') # Matches from xxx import bla _UpperCamelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') _UpperCamelCase = ''' {0} = None ''' _UpperCamelCase = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) ''' _UpperCamelCase = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' def lowerCAmelCase__( lowercase : Tuple ) -> Union[str, Any]: __snake_case : Tuple = _re_backend.findall(lowercase ) if len(lowercase ) == 0: return None return "_and_".join(lowercase ) def lowerCAmelCase__( ) -> Optional[Any]: with open(os.path.join(lowercase , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f: __snake_case : str = f.readlines() # Get to the point we do the actual imports for type checking __snake_case : Union[str, Any] = 0 __snake_case : str = {} # Go through the end of the file while line_index < len(lowercase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __snake_case : int = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("else:" ): line_index += 1 line_index += 1 __snake_case : List[str] = [] # Until we unindent, add backend objects to the list while line_index < len(lowercase ) and len(lines[line_index] ) > 1: __snake_case : List[str] = lines[line_index] __snake_case : Union[str, Any] = _re_single_line_import.search(lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(lowercase ) > 0: __snake_case : Optional[int] = objects else: line_index += 1 return backend_specific_objects def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Optional[int] ) -> Tuple: if name.isupper(): return DUMMY_CONSTANT.format(lowercase ) elif name.islower(): return DUMMY_FUNCTION.format(lowercase , lowercase ) else: return DUMMY_CLASS.format(lowercase , lowercase ) def lowerCAmelCase__( lowercase : Dict=None ) -> List[str]: if backend_specific_objects is None: __snake_case : str = read_init() # For special correspondence backend to module name as used in the function requires_modulename __snake_case : Any = {} for backend, objects in backend_specific_objects.items(): __snake_case : List[str] = "[" + ", ".join(f"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]" __snake_case : List[Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] ) __snake_case : Any = dummy_file return dummy_files def lowerCAmelCase__( lowercase : Any=False ) -> List[str]: __snake_case : Union[str, Any] = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __snake_case : Union[str, Any] = {"torch": "pt"} # Locate actual dummy modules and read their content. __snake_case : List[str] = os.path.join(lowercase , "utils" ) __snake_case : List[Any] = { backend: os.path.join(lowercase , f"""dummy_{short_names.get(lowercase , lowercase )}_objects.py""" ) for backend in dummy_files.keys() } __snake_case : List[str] = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(lowercase ): with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f: __snake_case : int = f.read() else: __snake_case : Dict = "" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"""Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main """ "__init__ has new objects." ) with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( "The main __init__ has objects that are not present in " f"""diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` """ "to fix this." ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _UpperCamelCase = parser.parse_args() check_dummies(args.fix_and_overwrite)
326
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : str UpperCAmelCase_ : str =None @staticmethod def UpperCAmelCase ( ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def UpperCAmelCase ( cls ) -> Tuple: '''simple docstring''' return F"""`pip install {cls.pip_package or cls.name}`""" class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[int] ="optuna" @staticmethod def UpperCAmelCase ( ) -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict: '''simple docstring''' return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' return default_hp_space_optuna(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[str] ="ray" UpperCAmelCase_ : Dict ="'ray[tune]'" @staticmethod def UpperCAmelCase ( ) -> str: '''simple docstring''' return is_ray_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return default_hp_space_ray(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple ="sigopt" @staticmethod def UpperCAmelCase ( ) -> int: '''simple docstring''' return is_sigopt_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict: '''simple docstring''' return default_hp_space_sigopt(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : str ="wandb" @staticmethod def UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' return is_wandb_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return default_hp_space_wandb(UpperCAmelCase ) _UpperCamelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCAmelCase__( ) -> str: __snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase ) > 0: __snake_case : Dict = available_backends[0].name if len(lowercase ) > 1: logger.info( f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
326
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Any =["pixel_values"] def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' super().__init__(**UpperCAmelCase ) __snake_case : List[Any] = size if size is not None else {"shortest_edge": 384} __snake_case : Dict = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) __snake_case : Tuple = do_resize __snake_case : Union[str, Any] = size # Default value set here for backwards compatibility where the value in config is None __snake_case : List[Any] = crop_pct if crop_pct is not None else 224 / 256 __snake_case : Optional[Any] = resample __snake_case : str = do_rescale __snake_case : List[str] = rescale_factor __snake_case : Any = do_normalize __snake_case : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __snake_case : List[str] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) __snake_case : str = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case : int = int(shortest_edge / crop_pct ) __snake_case : int = get_resize_output_image_size(UpperCAmelCase , size=UpperCAmelCase , default_to_square=UpperCAmelCase ) __snake_case : Union[str, Any] = resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=UpperCAmelCase , **UpperCAmelCase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Dict: '''simple docstring''' return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' __snake_case : Dict = do_resize if do_resize is not None else self.do_resize __snake_case : Tuple = crop_pct if crop_pct is not None else self.crop_pct __snake_case : str = resample if resample is not None else self.resample __snake_case : List[str] = do_rescale if do_rescale is not None else self.do_rescale __snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Tuple = image_mean if image_mean is not None else self.image_mean __snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std __snake_case : int = size if size is not None else self.size __snake_case : List[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) __snake_case : int = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __snake_case : List[Any] = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: __snake_case : int = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , crop_pct=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_rescale: __snake_case : Optional[int] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: __snake_case : Union[str, Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] __snake_case : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] __snake_case : Tuple = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
326
import math def lowerCAmelCase__( lowercase : list , lowercase : int = 0 , lowercase : int = 0 ) -> list: __snake_case : Any = end or len(lowercase ) for i in range(lowercase , lowercase ): __snake_case : List[str] = i __snake_case : Union[str, Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __snake_case : Optional[Any] = array[temp_index - 1] temp_index -= 1 __snake_case : Any = temp_index_value return array def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int ) -> None: # Max Heap __snake_case : Any = index __snake_case : Optional[Any] = 2 * index + 1 # Left Node __snake_case : str = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __snake_case : Optional[int] = left_index if right_index < heap_size and array[largest] < array[right_index]: __snake_case : Tuple = right_index if largest != index: __snake_case , __snake_case : int = array[largest], array[index] heapify(lowercase , lowercase , lowercase ) def lowerCAmelCase__( lowercase : list ) -> list: __snake_case : List[str] = len(lowercase ) for i in range(n // 2 , -1 , -1 ): heapify(lowercase , lowercase , lowercase ) for i in range(n - 1 , 0 , -1 ): __snake_case , __snake_case : Optional[Any] = array[0], array[i] heapify(lowercase , 0 , lowercase ) return array def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int: __snake_case : Union[str, Any] = low __snake_case : Union[str, Any] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __snake_case , __snake_case : str = array[j], array[i] i += 1 def lowerCAmelCase__( lowercase : list ) -> list: if len(lowercase ) == 0: return array __snake_case : Union[str, Any] = 2 * math.ceil(math.loga(len(lowercase ) ) ) __snake_case : Dict = 16 return intro_sort(lowercase , 0 , len(lowercase ) , lowercase , lowercase ) def lowerCAmelCase__( lowercase : list , lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(lowercase ) max_depth -= 1 __snake_case : List[str] = median_of_a(lowercase , lowercase , start + ((end - start) // 2) + 1 , end - 1 ) __snake_case : Optional[Any] = partition(lowercase , lowercase , lowercase , lowercase ) intro_sort(lowercase , lowercase , lowercase , lowercase , lowercase ) __snake_case : List[str] = p return insertion_sort(lowercase , lowercase , lowercase ) if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = input('''Enter numbers separated by a comma : ''').strip() _UpperCamelCase = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
326
1
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : int UpperCAmelCase_ : TreeNode | None =None UpperCAmelCase_ : TreeNode | None =None _UpperCamelCase = namedtuple('''CoinsDistribResult''', '''moves excess''') def lowerCAmelCase__( lowercase : TreeNode | None ) -> int: if root is None: return 0 # Validation def count_nodes(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase ) != count_coins(lowercase ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(lowercase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __snake_case , __snake_case : List[str] = get_distrib(node.left ) __snake_case , __snake_case : List[str] = get_distrib(node.right ) __snake_case : Dict = 1 - left_distrib_excess __snake_case : Dict = 1 - right_distrib_excess __snake_case : List[Any] = ( left_distrib_moves + right_distrib_moves + abs(lowercase ) + abs(lowercase ) ) __snake_case : List[Any] = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase , lowercase ) return get_distrib(lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
326
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowerCAmelCase__( ) -> List[Any]: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" __snake_case : Any = [1, 2, 3] with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=2 ) with pytest.raises(lowercase ): with parallel_backend("unsupported backend" ): map_nested(lowercase , lowercase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def lowerCAmelCase__( lowercase : Dict ) -> Dict: __snake_case : Any = [1, 2] __snake_case : Dict = {"a": 1, "b": 2} __snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]} __snake_case : int = {"a": {"1": 1}, "b": 2} __snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4} __snake_case : Dict = [2, 3] __snake_case : Tuple = {"a": 2, "b": 3} __snake_case : int = {"a": [2, 3], "b": [4, 5]} __snake_case : Dict = {"a": {"1": 2}, "b": 3} __snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
326
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCamelCase = 16 _UpperCamelCase = 32 def lowerCAmelCase__( lowercase : Accelerator , lowercase : DatasetDict , lowercase : List[int] , lowercase : List[int] , lowercase : int = 16 ) -> List[Any]: __snake_case : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) __snake_case : List[Any] = DatasetDict( { "train": dataset["train"].select(lowercase ), "validation": dataset["train"].select(lowercase ), "test": dataset["validation"], } ) def tokenize_function(lowercase : int ): # max_length=None => use the model max length (it's actually the default) __snake_case : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __snake_case : Optional[Any] = datasets.map( lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __snake_case : Any = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowercase : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. __snake_case : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __snake_case : str = 16 elif accelerator.mixed_precision != "no": __snake_case : str = 8 else: __snake_case : int = None return tokenizer.pad( lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , ) # Instantiate dataloaders. __snake_case : int = DataLoader( tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) __snake_case : List[str] = DataLoader( tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) __snake_case : Tuple = DataLoader( tokenized_datasets["test"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader, test_dataloader def lowerCAmelCase__( lowercase : Dict , lowercase : int ) -> int: # New Code # __snake_case : Optional[Any] = [] # Download the dataset __snake_case : Optional[int] = load_dataset("glue" , "mrpc" ) # Create our splits __snake_case : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator __snake_case : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __snake_case : Dict = config["lr"] __snake_case : Optional[Any] = int(config["num_epochs"] ) __snake_case : Union[str, Any] = int(config["seed"] ) __snake_case : int = int(config["batch_size"] ) __snake_case : Optional[Any] = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation __snake_case : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __snake_case : str = batch_size // MAX_GPU_BATCH_SIZE __snake_case : List[str] = MAX_GPU_BATCH_SIZE set_seed(lowercase ) # New Code # # Create our folds: __snake_case : List[str] = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] ) __snake_case : Tuple = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowercase ): __snake_case , __snake_case , __snake_case : Union[str, Any] = get_fold_dataloaders( lowercase , lowercase , lowercase , lowercase , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __snake_case : List[str] = model.to(accelerator.device ) # Instantiate optimizer __snake_case : List[Any] = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler __snake_case : Optional[Any] = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[Any] = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __snake_case : Dict = model(**lowercase ) __snake_case : Union[str, Any] = outputs.loss __snake_case : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __snake_case : Optional[int] = model(**lowercase ) __snake_case : Optional[int] = outputs.logits.argmax(dim=-1 ) __snake_case , __snake_case : str = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowercase , references=lowercase , ) __snake_case : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase ) # New Code # # We also run predictions on the test set at the very end __snake_case : Any = [] for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __snake_case : int = model(**lowercase ) __snake_case : List[Any] = outputs.logits __snake_case , __snake_case : int = accelerator.gather_for_metrics((predictions, batch["labels"]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowercase , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: __snake_case : int = torch.cat(lowercase , dim=0 ) __snake_case : Optional[int] = torch.stack(lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) __snake_case : Tuple = metric.compute(predictions=lowercase , references=lowercase ) accelerator.print("Average test metrics from all folds:" , lowercase ) def lowerCAmelCase__( ) -> Union[str, Any]: __snake_case : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) # New Code # parser.add_argument("--num_folds" , type=lowercase , default=3 , help="The number of splits to perform across the dataset" ) __snake_case : Dict = parser.parse_args() __snake_case : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
326
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase__( lowercase : Dict , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 100 , lowercase : float = 0.0_1 , lowercase : float = 1 , ) -> Any: __snake_case : Optional[Any] = False __snake_case : Optional[Any] = search_prob __snake_case : str = start_temperate __snake_case : List[Any] = [] __snake_case : str = 0 __snake_case : Dict = None while not search_end: __snake_case : List[Any] = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case : List[Any] = current_state scores.append(lowercase ) iterations += 1 __snake_case : Dict = None __snake_case : str = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case : Any = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor __snake_case : int = neighbors.pop(lowercase ) __snake_case : Optional[Any] = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case : Any = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case : List[str] = picked_neighbor else: __snake_case : Optional[Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case : str = picked_neighbor __snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case : Optional[Any] = True else: __snake_case : str = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(lowercase ) , lowercase ) plt.xlabel("Iterations" ) plt.ylabel("Function values" ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase__( lowercase : List[str] , lowercase : Tuple ) -> str: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) _UpperCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def lowerCAmelCase__( lowercase : Any , lowercase : Union[str, Any] ) -> Any: return (3 * x**2) - (6 * y) _UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' ) _UpperCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCamelCase = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'''{local_min.score()}''' )
326
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =["pixel_values"] def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 0.9 , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' super().__init__(**UpperCAmelCase ) __snake_case : List[Any] = size if size is not None else {"shortest_edge": 224} __snake_case : Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) __snake_case : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224} __snake_case : Optional[Any] = get_size_dict(UpperCAmelCase , param_name="crop_size" ) __snake_case : Optional[Any] = do_resize __snake_case : List[str] = size __snake_case : str = crop_pct __snake_case : Any = resample __snake_case : Optional[Any] = do_center_crop __snake_case : Dict = crop_size __snake_case : Dict = do_rescale __snake_case : List[Any] = rescale_factor __snake_case : Optional[int] = do_normalize __snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __snake_case : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __snake_case : int = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) if crop_pct is not None: if "shortest_edge" in size: __snake_case : List[Any] = int(size["shortest_edge"] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __snake_case : Dict = int(size["height"] / crop_pct ) else: __snake_case : Tuple = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct )) else: raise ValueError("Invalid size for resize: {}".format(UpperCAmelCase ) ) __snake_case : Tuple = get_resize_output_image_size(UpperCAmelCase , size=UpperCAmelCase , default_to_square=UpperCAmelCase ) else: if "shortest_edge" in size: __snake_case : List[str] = get_resize_output_image_size(UpperCAmelCase , size=size["shortest_edge"] , default_to_square=UpperCAmelCase ) elif "height" in size and "width" in size: __snake_case : Optional[int] = (size["height"], size["width"]) else: raise ValueError("Invalid size for resize: {}".format(UpperCAmelCase ) ) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' __snake_case : int = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Tuple: '''simple docstring''' return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image: '''simple docstring''' __snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize __snake_case : Tuple = crop_pct if crop_pct is not None else self.crop_pct __snake_case : str = resample if resample is not None else self.resample __snake_case : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : int = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : int = do_normalize if do_normalize is not None else self.do_normalize __snake_case : List[str] = image_mean if image_mean is not None else self.image_mean __snake_case : Optional[Any] = image_std if image_std is not None else self.image_std __snake_case : Dict = size if size is not None else self.size __snake_case : int = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) __snake_case : Any = crop_size if crop_size is not None else self.crop_size __snake_case : int = get_size_dict(UpperCAmelCase , param_name="crop_size" ) __snake_case : Union[str, Any] = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_pct is None: raise ValueError("Crop_pct must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __snake_case : Dict = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: __snake_case : List[Any] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , crop_pct=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_center_crop: __snake_case : Any = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images] if do_rescale: __snake_case : int = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_normalize: __snake_case : Tuple = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images] __snake_case : Any = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] __snake_case : List[str] = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
326
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"] UpperCAmelCase_ : Tuple ="FlavaImageProcessor" UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast") def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) __snake_case : List[Any] = kwargs.pop("feature_extractor" ) __snake_case : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = self.image_processor def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __snake_case : Union[str, Any] = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if images is not None: __snake_case : Union[str, Any] = self.image_processor( UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) if text is not None and images is not None: encoding.update(UpperCAmelCase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[Any] = self.tokenizer.model_input_names __snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
326
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } _UpperCamelCase = { '''camembert-base''': 512, } _UpperCamelCase = '''▁''' class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : str =["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' __snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token __snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) __snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase ) ) __snake_case : Dict = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __snake_case : Optional[int] = len(self.fairseq_tokens_to_ids ) __snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Dict = [self.cls_token_id] __snake_case : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1] def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(UpperCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = [] __snake_case : Union[str, Any] = "" __snake_case : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase ) + token __snake_case : List[Any] = True __snake_case : Union[str, Any] = [] else: current_sub_tokens.append(UpperCAmelCase ) __snake_case : int = False out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def __getstate__( self ) -> List[Any]: '''simple docstring''' __snake_case : str = self.__dict__.copy() __snake_case : Optional[Any] = None return state def __setstate__( self , UpperCAmelCase ) -> str: '''simple docstring''' __snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case : List[str] = {} __snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case : Optional[Any] = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: __snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,)
326
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } _UpperCamelCase = { '''camembert-base''': 512, } _UpperCamelCase = '''▁''' class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES UpperCAmelCase_ : str =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : str =["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' __snake_case : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token __snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) __snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase ) ) __snake_case : Dict = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __snake_case : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} __snake_case : Optional[int] = len(self.fairseq_tokens_to_ids ) __snake_case : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __snake_case : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Dict = [self.cls_token_id] __snake_case : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1] def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(UpperCAmelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = [] __snake_case : Union[str, Any] = "" __snake_case : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase ) + token __snake_case : List[Any] = True __snake_case : Union[str, Any] = [] else: current_sub_tokens.append(UpperCAmelCase ) __snake_case : int = False out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def __getstate__( self ) -> List[Any]: '''simple docstring''' __snake_case : str = self.__dict__.copy() __snake_case : Optional[Any] = None return state def __setstate__( self , UpperCAmelCase ) -> str: '''simple docstring''' __snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case : List[str] = {} __snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case : Optional[Any] = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: __snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,)
326
1
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Dict ="xlm-prophetnet" UpperCAmelCase_ : int =["past_key_values"] UpperCAmelCase_ : str ={ "num_attention_heads": "num_encoder_attention_heads", } def __init__( self , UpperCAmelCase = 0.1 , UpperCAmelCase = "gelu" , UpperCAmelCase = 30522 , UpperCAmelCase = 1024 , UpperCAmelCase = 4096 , UpperCAmelCase = 12 , UpperCAmelCase = 16 , UpperCAmelCase = 4096 , UpperCAmelCase = 12 , UpperCAmelCase = 16 , UpperCAmelCase = 0.1 , UpperCAmelCase = 0.1 , UpperCAmelCase = 512 , UpperCAmelCase = 0.02 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 0 , UpperCAmelCase = 2 , UpperCAmelCase = 32 , UpperCAmelCase = 128 , UpperCAmelCase = False , UpperCAmelCase = 0.0 , UpperCAmelCase = True , UpperCAmelCase = 0 , UpperCAmelCase = 1 , UpperCAmelCase = 2 , **UpperCAmelCase , ) -> str: '''simple docstring''' __snake_case : List[Any] = vocab_size __snake_case : Optional[Any] = hidden_size __snake_case : Union[str, Any] = encoder_ffn_dim __snake_case : Optional[int] = num_encoder_layers __snake_case : Optional[Any] = num_encoder_attention_heads __snake_case : Union[str, Any] = decoder_ffn_dim __snake_case : List[str] = num_decoder_layers __snake_case : List[str] = num_decoder_attention_heads __snake_case : str = max_position_embeddings __snake_case : str = init_std # Normal(0, this parameter) __snake_case : Optional[int] = activation_function # parameters for xlmprophetnet __snake_case : Tuple = ngram __snake_case : Optional[int] = num_buckets __snake_case : Optional[int] = relative_max_distance __snake_case : Any = disable_ngram_loss __snake_case : Optional[Any] = eps # 3 Types of Dropout __snake_case : List[Any] = attention_dropout __snake_case : List[str] = activation_dropout __snake_case : Optional[Any] = dropout __snake_case : Union[str, Any] = use_cache super().__init__( pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , add_cross_attention=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , ) @property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
326
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool: __snake_case : List[str] = len(lowercase ) __snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __snake_case : Optional[Any] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __snake_case : Union[str, Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __snake_case : List[str] = subset[i - 1][j] if arr[i - 1] <= j: __snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
326
1
import logging import os from .state import PartialState class _lowerCamelCase ( logging.LoggerAdapter ): """simple docstring""" @staticmethod def UpperCAmelCase ( UpperCAmelCase ) -> Dict: '''simple docstring''' __snake_case : str = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Any: '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) __snake_case : Union[str, Any] = kwargs.pop("main_process_only" , UpperCAmelCase ) __snake_case : Any = kwargs.pop("in_order" , UpperCAmelCase ) if self.isEnabledFor(UpperCAmelCase ): if self._should_log(UpperCAmelCase ): __snake_case , __snake_case : Tuple = self.process(UpperCAmelCase , UpperCAmelCase ) self.logger.log(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) elif in_order: __snake_case : Dict = PartialState() for i in range(state.num_processes ): if i == state.process_index: __snake_case , __snake_case : List[Any] = self.process(UpperCAmelCase , UpperCAmelCase ) self.logger.log(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) state.wait_for_everyone() def lowerCAmelCase__( lowercase : str , lowercase : str = None ) -> List[str]: if log_level is None: __snake_case : Any = os.environ.get("ACCELERATE_LOG_LEVEL" , lowercase ) __snake_case : List[str] = logging.getLogger(lowercase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(lowercase , {} )
326
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _UpperCamelCase = 4 _UpperCamelCase = 3 class _lowerCamelCase ( a ): """simple docstring""" pass def lowerCAmelCase__( lowercase : List[str] ) -> Any: for shard in shards: for i in range(lowercase ): yield {"i": i, "shard": shard} def lowerCAmelCase__( ) -> Optional[int]: __snake_case : List[Any] = int(os.environ["RANK"] ) __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : List[str] = ArgumentParser() parser.add_argument("--streaming" , type=lowercase ) parser.add_argument("--local_rank" , type=lowercase ) parser.add_argument("--num_workers" , type=lowercase , default=0 ) __snake_case : Any = parser.parse_args() __snake_case : Dict = args.streaming __snake_case : Union[str, Any] = args.num_workers __snake_case : Any = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(lowercase )]} __snake_case : Optional[int] = IterableDataset.from_generator(lowercase , gen_kwargs=lowercase ) if not streaming: __snake_case : Any = Dataset.from_list(list(lowercase ) ) __snake_case : Dict = split_dataset_by_node(lowercase , rank=lowercase , world_size=lowercase ) __snake_case : Union[str, Any] = torch.utils.data.DataLoader(lowercase , num_workers=lowercase ) __snake_case : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __snake_case : List[str] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __snake_case : Dict = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
326
1
from math import factorial class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Optional[int] = real if isinstance(UpperCAmelCase , UpperCAmelCase ): __snake_case : Optional[Any] = [1] * rank else: __snake_case : List[str] = rank def __repr__( self ) -> List[Any]: '''simple docstring''' return ( F"""{self.real}+""" F"""{"+".join(str(UpperCAmelCase )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}""" ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : str = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , UpperCAmelCase ) def __add__( self , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if not isinstance(UpperCAmelCase , UpperCAmelCase ): return Dual(self.real + other , self.duals ) __snake_case : List[str] = self.duals.copy() __snake_case : str = other.duals.copy() if len(UpperCAmelCase ) > len(UpperCAmelCase ): o_dual.extend([1] * (len(UpperCAmelCase ) - len(UpperCAmelCase )) ) elif len(UpperCAmelCase ) < len(UpperCAmelCase ): s_dual.extend([1] * (len(UpperCAmelCase ) - len(UpperCAmelCase )) ) __snake_case : Dict = [] for i in range(len(UpperCAmelCase ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , UpperCAmelCase ) UpperCAmelCase_ : int =__add__ def __sub__( self , UpperCAmelCase ) -> Dict: '''simple docstring''' return self + other * -1 def __mul__( self , UpperCAmelCase ) -> Any: '''simple docstring''' if not isinstance(UpperCAmelCase , UpperCAmelCase ): __snake_case : Dict = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , UpperCAmelCase ) __snake_case : int = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , UpperCAmelCase ) UpperCAmelCase_ : Optional[Any] =__mul__ def __truediv__( self , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' if not isinstance(UpperCAmelCase , UpperCAmelCase ): __snake_case : Tuple = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , UpperCAmelCase ) raise ValueError def __floordiv__( self , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' if not isinstance(UpperCAmelCase , UpperCAmelCase ): __snake_case : Optional[int] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , UpperCAmelCase ) raise ValueError def __pow__( self , UpperCAmelCase ) -> Dict: '''simple docstring''' if n < 0 or isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError("power must be a positive integer" ) if n == 0: return 1 if n == 1: return self __snake_case : Union[str, Any] = self for _ in range(n - 1 ): x *= self return x def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : int , lowercase : str ) -> Any: if not callable(lowercase ): raise ValueError("differentiate() requires a function as input for func" ) if not isinstance(lowercase , (float, int) ): raise ValueError("differentiate() requires a float as input for position" ) if not isinstance(lowercase , lowercase ): raise ValueError("differentiate() requires an int as input for order" ) __snake_case : int = Dual(lowercase , 1 ) __snake_case : Tuple = func(lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def lowerCAmelCase__( lowercase : Union[str, Any] ) -> str: return y**2 * y**4 print(differentiate(f, 9, 2))
326
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int: __snake_case : List[Any] = limit + 1 __snake_case : List[str] = [0] * limit for first_term in range(1 , lowercase ): for n in range(lowercase , lowercase , lowercase ): __snake_case : Union[str, Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
326
1
import argparse import os import torch from transformers.utils import WEIGHTS_NAME _UpperCamelCase = ['''small''', '''medium''', '''large'''] _UpperCamelCase = '''lm_head.decoder.weight''' _UpperCamelCase = '''lm_head.weight''' def lowerCAmelCase__( lowercase : str , lowercase : str ) -> Optional[int]: __snake_case : List[str] = torch.load(lowercase ) __snake_case : str = d.pop(lowercase ) os.makedirs(lowercase , exist_ok=lowercase ) torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) _UpperCamelCase = parser.parse_args() for MODEL in DIALOGPT_MODELS: _UpperCamelCase = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''') _UpperCamelCase = F'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
326
from __future__ import annotations def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]: __snake_case : List[str] = word_bank or [] # create a table __snake_case : int = len(lowercase ) + 1 __snake_case : list[list[list[str]]] = [] for _ in range(lowercase ): table.append([] ) # seed value __snake_case : Optional[int] = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowercase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowercase )] == word: __snake_case : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowercase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowercase )]: combination.reverse() return table[len(lowercase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
326
1
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder _UpperCamelCase = '''base_with_context''' def lowerCAmelCase__( lowercase : List[str] , lowercase : str ) -> Tuple: __snake_case : Any = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) ) __snake_case : Optional[int] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase ) for lyr_num, lyr in enumerate(model.encoders ): __snake_case : List[str] = weights[f"""layers_{lyr_num}"""] __snake_case : int = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) __snake_case : str = ly_weight["attention"] __snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) __snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) __snake_case : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) __snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) __snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) __snake_case : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) __snake_case : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) __snake_case : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) __snake_case : int = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : List[Any] ) -> Dict: __snake_case : List[Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) ) __snake_case : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase ) for lyr_num, lyr in enumerate(model.encoders ): __snake_case : Tuple = weights[f"""layers_{lyr_num}"""] __snake_case : List[str] = ly_weight["attention"] __snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) __snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) __snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) __snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) __snake_case : Dict = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) __snake_case : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) __snake_case : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) __snake_case : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) __snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) __snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def lowerCAmelCase__( lowercase : str , lowercase : Optional[int] ) -> Dict: __snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) ) __snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) ) __snake_case : Dict = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase ) __snake_case : Tuple = nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) ) for lyr_num, lyr in enumerate(model.decoders ): __snake_case : Optional[Any] = weights[f"""layers_{lyr_num}"""] __snake_case : Any = nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) ) __snake_case : Dict = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) ) __snake_case : Optional[int] = ly_weight["self_attention"] __snake_case : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) __snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) __snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) __snake_case : Any = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) __snake_case : Tuple = ly_weight["MultiHeadDotProductAttention_0"] __snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) __snake_case : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) __snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) __snake_case : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) __snake_case : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) ) __snake_case : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) __snake_case : int = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) ) __snake_case : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) __snake_case : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) __snake_case : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) __snake_case : Dict = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) ) __snake_case : Any = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) ) return model def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]: __snake_case : List[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path ) __snake_case : List[Any] = jnp.tree_util.tree_map(onp.array , lowercase ) __snake_case : Tuple = [ "from __gin__ import dynamic_registration", "from music_spectrogram_diffusion.models.diffusion import diffusion_utils", "diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0", "diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()", ] __snake_case : Union[str, Any] = os.path.join(args.checkpoint_path , ".." , "config.gin" ) __snake_case : Dict = inference.parse_training_gin_file(lowercase , lowercase ) __snake_case : Union[str, Any] = inference.InferenceModel(args.checkpoint_path , lowercase ) __snake_case : List[str] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" ) __snake_case : List[Any] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) __snake_case : Any = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) __snake_case : Tuple = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) __snake_case : Optional[Any] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , lowercase ) __snake_case : Tuple = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , lowercase ) __snake_case : int = load_decoder(ta_checkpoint["target"]["decoder"] , lowercase ) __snake_case : List[Any] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" ) __snake_case : Tuple = SpectrogramDiffusionPipeline( notes_encoder=lowercase , continuous_encoder=lowercase , decoder=lowercase , scheduler=lowercase , melgan=lowercase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument( '''--checkpoint_path''', default=F'''{MODEL}/checkpoint_500000''', type=str, required=False, help='''Path to the original jax model checkpoint.''', ) _UpperCamelCase = parser.parse_args() main(args)
326
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=56 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = parent __snake_case : Tuple = batch_size __snake_case : List[str] = seq_length __snake_case : Optional[int] = is_training __snake_case : int = use_attention_mask __snake_case : Union[str, Any] = use_token_type_ids __snake_case : Any = use_labels __snake_case : List[str] = vocab_size __snake_case : int = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Optional[int] = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : List[Any] = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : Dict = initializer_range __snake_case : List[Any] = num_choices __snake_case : Union[str, Any] = rescale_embeddings __snake_case : List[Any] = attention_type __snake_case : str = use_bias __snake_case : Dict = block_size __snake_case : Optional[Any] = num_random_blocks def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_attention_mask: __snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Union[str, Any] = None if self.use_token_type_ids: __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[int] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs __snake_case : int = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class _lowerCamelCase ( a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) UpperCAmelCase_ : Dict =False UpperCAmelCase_ : str =False def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Dict = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Any: '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_hidden_states_output() @slow def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase ( self ) -> int: '''simple docstring''' __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __snake_case : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = model_class(UpperCAmelCase ) @jax.jit def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ): return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase ) with self.subTest("JIT Enabled" ): __snake_case : int = model_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __snake_case : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> int: '''simple docstring''' if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
326
1
from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> str: '''simple docstring''' __snake_case : Union[str, Any] = parent __snake_case : int = 13 __snake_case : Tuple = 7 __snake_case : Tuple = True __snake_case : int = True __snake_case : List[Any] = True __snake_case : Optional[Any] = True __snake_case : Union[str, Any] = 99 __snake_case : Union[str, Any] = 32 __snake_case : List[Any] = 2 __snake_case : Optional[Any] = 4 __snake_case : Any = 37 __snake_case : Dict = "gelu" __snake_case : Optional[int] = 0.1 __snake_case : str = 0.1 __snake_case : Dict = 512 __snake_case : Optional[Any] = 16 __snake_case : Optional[int] = 2 __snake_case : str = 0.02 __snake_case : Tuple = 3 __snake_case : List[Any] = 4 __snake_case : str = None def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = None if self.use_input_mask: __snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : List[str] = None if self.use_token_type_ids: __snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[Any] = None __snake_case : Optional[int] = None __snake_case : List[str] = None if self.use_labels: __snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : int = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]: '''simple docstring''' __snake_case : Union[str, Any] = TFRoFormerModel(config=UpperCAmelCase ) __snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __snake_case : List[str] = [input_ids, input_mask] __snake_case : Tuple = model(UpperCAmelCase ) __snake_case : List[str] = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Dict = True __snake_case : Dict = TFRoFormerForCausalLM(config=UpperCAmelCase ) __snake_case : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __snake_case : int = model(UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase ) __snake_case : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __snake_case : Union[str, Any] = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = self.num_labels __snake_case : Optional[int] = TFRoFormerForSequenceClassification(config=UpperCAmelCase ) __snake_case : Tuple = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __snake_case : List[str] = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : Union[str, Any] = self.num_choices __snake_case : Optional[Any] = TFRoFormerForMultipleChoice(config=UpperCAmelCase ) __snake_case : Dict = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __snake_case : Optional[int] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __snake_case : Union[str, Any] = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : int = self.num_labels __snake_case : Optional[int] = TFRoFormerForTokenClassification(config=UpperCAmelCase ) __snake_case : int = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __snake_case : List[Any] = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: '''simple docstring''' __snake_case : str = TFRoFormerForQuestionAnswering(config=UpperCAmelCase ) __snake_case : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __snake_case : Union[str, Any] = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Any = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Optional[int] = config_and_inputs __snake_case : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _lowerCamelCase ( a , a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Any =( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase_ : List[Any] =( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase_ : List[str] =False UpperCAmelCase_ : List[Any] =False def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Union[str, Any] = TFRoFormerModelTester(self ) __snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase ) def UpperCAmelCase ( self ) -> Any: '''simple docstring''' __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase ) @slow def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Any = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(UpperCAmelCase ) @require_tf class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Tuple = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) __snake_case : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __snake_case : str = model(UpperCAmelCase )[0] # TODO Replace vocab size __snake_case : str = 50000 __snake_case : Union[str, Any] = [1, 6, vocab_size] self.assertEqual(output.shape , UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __snake_case : Dict = tf.constant( [ [ [-0.12_053_341, -1.0_264_901, 0.29_221_946], [-1.5_133_783, 0.197_433, 0.15_190_607], [-5.0_135_403, -3.900_256, -0.84_038_764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) @require_tf class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Any =1e-4 def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = tf.constant([[4, 10]] ) __snake_case : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __snake_case : Optional[int] = emba(input_ids.shape ) __snake_case : str = tf.constant( [[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] ) tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance ) def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[Any] = tf.constant( [ [0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000], [0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617], [0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870], ] ) __snake_case : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) __snake_case : Optional[int] = emba.weight[:3, :5] tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance ) @require_tf class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : str =1e-4 def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[str] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 __snake_case : Optional[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 __snake_case : Optional[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) __snake_case : int = embed_positions([2, 16, 768] )[None, None, :, :] __snake_case , __snake_case : Dict = TFRoFormerSelfAttention.apply_rotary_position_embeddings( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __snake_case : Dict = tf.constant( [ [0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700], [-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343], [-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985], [-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871], [0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980], [3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253], ] ) __snake_case : Optional[Any] = tf.constant( [ [0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700], [0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343], [1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985], [2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871], [-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980], [-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
326
import argparse import datetime def lowerCAmelCase__( lowercase : str ) -> str: __snake_case : int = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } __snake_case : int = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase ) < 11: raise ValueError("Must be 10 characters long" ) # Get month __snake_case : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12" ) __snake_case : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get day __snake_case : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31" ) # Get second separator __snake_case : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get year __snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation __snake_case : str = datetime.date(int(lowercase ) , int(lowercase ) , int(lowercase ) ) # Start math if m <= 2: __snake_case : Optional[Any] = y - 1 __snake_case : Tuple = m + 12 # maths var __snake_case : int = int(str(lowercase )[:2] ) __snake_case : int = int(str(lowercase )[2:] ) __snake_case : int = int(2.6 * m - 5.3_9 ) __snake_case : int = int(c / 4 ) __snake_case : int = int(k / 4 ) __snake_case : int = int(d + k ) __snake_case : int = int(t + u + v + x ) __snake_case : int = int(z - (2 * c) ) __snake_case : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response __snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) _UpperCamelCase = parser.parse_args() zeller(args.date_input)
326
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =["image_processor", "tokenizer"] UpperCAmelCase_ : int ="CLIPImageProcessor" UpperCAmelCase_ : Tuple =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) __snake_case : int = kwargs.pop("feature_extractor" ) __snake_case : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __snake_case : Any = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if images is not None: __snake_case : Any = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is not None and images is not None: __snake_case : Dict = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Tuple = self.tokenizer.model_input_names __snake_case : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
326
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int: if index == r: for j in range(lowercase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __snake_case : Union[str, Any] = arr[i] combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]: # A temporary array to store all combination one by one __snake_case : Tuple = [0] * r # Print all combination using temporary array 'data[]' combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 ) if __name__ == "__main__": # Driver code to check the function above _UpperCamelCase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
326
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : Optional[Union[str, Path]] =None UpperCAmelCase_ : bool =False UpperCAmelCase_ : bool =False UpperCAmelCase_ : bool =False UpperCAmelCase_ : Optional[Dict] =None UpperCAmelCase_ : Optional[str] =None UpperCAmelCase_ : bool =False UpperCAmelCase_ : bool =False UpperCAmelCase_ : bool =False UpperCAmelCase_ : bool =True UpperCAmelCase_ : Optional[int] =None UpperCAmelCase_ : int =1 UpperCAmelCase_ : Optional[Union[str, bool]] =None UpperCAmelCase_ : bool =False UpperCAmelCase_ : Optional[Dict] =None UpperCAmelCase_ : Optional[str] =None def UpperCAmelCase ( self ) -> "DownloadConfig": '''simple docstring''' return self.__class__(**{k: copy.deepcopy(UpperCAmelCase ) for k, v in self.__dict__.items()} )
326
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] _UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def lowerCAmelCase__( lowercase : str ) -> Optional[Any]: __snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" ) return sd def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict: __snake_case : Tuple = OrderedDict() __snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __snake_case : Optional[Any] = key for name_pair in rename_keys_prefix: __snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __snake_case : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __snake_case : List[Any] = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]: assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: __snake_case : Any = "pretraining" if "vcr" in checkpoint_path: __snake_case : Optional[Any] = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: __snake_case : Tuple = {"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: __snake_case : Any = {"visual_embedding_dim": 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: __snake_case : Dict = {"visual_embedding_dim": 512} __snake_case : Any = "multichoice" elif "vqa_advanced" in checkpoint_path: __snake_case : List[Any] = {"visual_embedding_dim": 2048} __snake_case : Optional[Any] = "vqa_advanced" elif "vqa" in checkpoint_path: __snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129} __snake_case : Union[str, Any] = "vqa" elif "nlvr" in checkpoint_path: __snake_case : Tuple = { "visual_embedding_dim": 1024, "num_labels": 2, } __snake_case : List[Any] = "nlvr" __snake_case : Union[str, Any] = VisualBertConfig(**lowercase ) # Load State Dict __snake_case : Any = load_state_dict(lowercase ) __snake_case : Dict = get_new_dict(lowercase , lowercase ) if model_type == "pretraining": __snake_case : Optional[Any] = VisualBertForPreTraining(lowercase ) elif model_type == "vqa": __snake_case : Tuple = VisualBertForQuestionAnswering(lowercase ) elif model_type == "nlvr": __snake_case : Tuple = VisualBertForVisualReasoning(lowercase ) elif model_type == "multichoice": __snake_case : List[Any] = VisualBertForMultipleChoice(lowercase ) model.load_state_dict(lowercase ) # Save Checkpoints Path(lowercase ).mkdir(exist_ok=lowercase ) model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') _UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
326
1
import os # Precomputes a list of the 100 first triangular numbers _UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def lowerCAmelCase__( ) -> Dict: __snake_case : Tuple = os.path.dirname(os.path.realpath(lowercase ) ) __snake_case : Union[str, Any] = os.path.join(lowercase , "words.txt" ) __snake_case : int = "" with open(lowercase ) as f: __snake_case : Optional[int] = f.readline() __snake_case : Optional[int] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )] __snake_case : Optional[Any] = [ word for word in [sum(ord(lowercase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(lowercase ) if __name__ == "__main__": print(solution())
326
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple: # Load configuration defined in the metadata file with open(lowercase ) as metadata_file: __snake_case : int = json.load(lowercase ) __snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] ) # Load in the weights from the checkpoint_path __snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"] # Load the entity vocab file __snake_case : Tuple = load_original_entity_vocab(lowercase ) # add an entry for [MASK2] __snake_case : Optional[int] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 __snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks __snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase ) __snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(lowercase ) with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f: __snake_case : Tuple = json.load(lowercase ) __snake_case : List[Any] = "MLukeTokenizer" with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(lowercase , lowercase ) __snake_case : Any = MLukeTokenizer.from_pretrained(lowercase ) # Initialize the embeddings of the special tokens __snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0] __snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0] __snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"] __snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: __snake_case : List[Any] = state_dict[bias_name] __snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 ) __snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 ) __snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self.""" __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] __snake_case : str = state_dict[prefix + matrix_name] __snake_case : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"] __snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Any = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' __snake_case : List[Any] = state_dict["entity_predictions.bias"] __snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) __snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] ) __snake_case : Any = LukeForMaskedLM(config=lowercase ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) __snake_case : int = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): __snake_case : str = state_dict[key] else: __snake_case : str = state_dict[key] __snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase ) if set(lowercase ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(lowercase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs __snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" ) __snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." __snake_case : Union[str, Any] = (0, 9) __snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : Any = model(**lowercase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : Optional[Any] = torch.Size((1, 33, 768) ) __snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base __snake_case : str = torch.Size((1, 1, 768) ) __snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction __snake_case : str = MLukeTokenizer.from_pretrained(lowercase ) __snake_case : Dict = "Tokyo is the capital of <mask>." __snake_case : Union[str, Any] = (24, 30) __snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" ) __snake_case : int = model(**lowercase ) __snake_case : Dict = encoding["input_ids"][0].tolist() __snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) __snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase ) __snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item() __snake_case : Optional[int] = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowercase ) ) model.save_pretrained(lowercase ) def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]: __snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"] __snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )] __snake_case : Any = {} for entry in data: __snake_case : Any = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: __snake_case : Optional[int] = entity_id break __snake_case : Union[str, Any] = f"""{language}:{entity_name}""" __snake_case : Any = entity_id return new_mapping if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) _UpperCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
326
1
import itertools import math def lowerCAmelCase__( lowercase : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase__( ) -> Optional[Any]: __snake_case : List[Any] = 2 while True: if is_prime(lowercase ): yield num num += 1 def lowerCAmelCase__( lowercase : int = 1_0001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , lowercase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
326
from maths.prime_factors import prime_factors def lowerCAmelCase__( lowercase : int ) -> int: if not isinstance(lowercase , lowercase ): __snake_case : Optional[int] = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(lowercase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
326
1
import re from filelock import FileLock try: import nltk _UpperCamelCase = True except (ImportError, ModuleNotFoundError): _UpperCamelCase = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def lowerCAmelCase__( lowercase : str ) -> str: re.sub("<n>" , "" , lowercase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowercase ) )
326
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) __snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" ) __snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids __snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids __snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) __snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits __snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean() __snake_case : Any = -(labels.shape[-1] * loss.item()) __snake_case : List[str] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
326
1
import numpy as np def lowerCAmelCase__( lowercase : np.ndarray ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def lowerCAmelCase__( lowercase : np.ndarray ) -> np.ndarray: return vector * sigmoid(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
326
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]: '''simple docstring''' super().__init__(UpperCAmelCase ) __snake_case : Optional[int] = proj_size __snake_case : str = CLIPVisionModel(UpperCAmelCase ) __snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase ) __snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size ) __snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]: '''simple docstring''' __snake_case : int = self.model(pixel_values=UpperCAmelCase ) __snake_case : Optional[int] = clip_output.pooler_output __snake_case : Any = self.mapper(latent_states[:, None] ) __snake_case : Any = self.final_layer_norm(UpperCAmelCase ) __snake_case : str = self.proj_out(UpperCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : List[Any] = (config.num_hidden_layers + 1) // 5 __snake_case : Dict = config.hidden_size __snake_case : str = 1 __snake_case : List[Any] = nn.ModuleList( [ BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase ) for _ in range(UpperCAmelCase ) ] ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' for block in self.blocks: __snake_case : int = block(UpperCAmelCase ) return hidden_states
326
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] ="luke" def __init__( self , UpperCAmelCase=50267 , UpperCAmelCase=500000 , UpperCAmelCase=768 , UpperCAmelCase=256 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-12 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) __snake_case : List[str] = vocab_size __snake_case : int = entity_vocab_size __snake_case : Dict = hidden_size __snake_case : int = entity_emb_size __snake_case : Tuple = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : List[str] = hidden_act __snake_case : Union[str, Any] = intermediate_size __snake_case : int = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Union[str, Any] = max_position_embeddings __snake_case : Tuple = type_vocab_size __snake_case : Union[str, Any] = initializer_range __snake_case : int = layer_norm_eps __snake_case : str = use_entity_aware_attention __snake_case : Dict = classifier_dropout
326
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
326
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
326
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = torch.device('''cpu''') def lowerCAmelCase__( ) -> Any: __snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def lowerCAmelCase__( lowercase : Dict ) -> List[Any]: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] ) def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]: __snake_case : List[Any] = dct.pop(lowercase ) __snake_case : List[Any] = val def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple: __snake_case : Optional[Any] = [] for k in state_dict.keys(): __snake_case : Union[str, Any] = k if ".pwconv" in k: __snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" ) if ".dwconv" in k: __snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" ) if ".Proj." in k: __snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." ) if "patch_embed" in k_new: __snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: __snake_case : int = k_new.split("." ) if ls[2].isdigit(): __snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: __snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]: __snake_case : List[str] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __snake_case : Tuple = 1000 __snake_case : Any = "huggingface/label-files" __snake_case : int = "imagenet-1k-id2label.json" __snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) __snake_case : str = {int(lowercase ): v for k, v in idalabel.items()} __snake_case : int = idalabel __snake_case : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __snake_case : Optional[Any] = [3, 3, 6, 4] __snake_case : Optional[int] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __snake_case : List[str] = [3, 3, 9, 6] __snake_case : Optional[Any] = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __snake_case : Optional[int] = [4, 3, 10, 5] __snake_case : Dict = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __snake_case : str = [4, 4, 12, 6] __snake_case : Optional[Any] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): __snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase ) else: __snake_case : Tuple = torch.load(lowercase , map_location="cpu" ) __snake_case : Optional[int] = checkpoint __snake_case : Any = create_rename_keys(lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase , lowercase , lowercase ) # load HuggingFace model __snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval() hf_model.load_state_dict(lowercase ) # prepare test inputs __snake_case : Optional[Any] = prepare_img() __snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" ) __snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" ) # compare outputs from both models __snake_case : str = get_expected_output(lowercase ) __snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _UpperCamelCase = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
326
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _UpperCamelCase = '''pt''' elif is_tf_available(): _UpperCamelCase = '''tf''' else: _UpperCamelCase = '''jax''' class _lowerCamelCase ( a , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] =PerceiverTokenizer UpperCAmelCase_ : Union[str, Any] =False def UpperCAmelCase ( self ) -> Any: '''simple docstring''' super().setUp() __snake_case : Any = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase ( self ) -> int: '''simple docstring''' return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" ) def UpperCAmelCase ( self , **UpperCAmelCase ) -> PerceiverTokenizer: '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=20 , UpperCAmelCase=5 ) -> Tuple[str, list]: '''simple docstring''' __snake_case : int = [] for i in range(len(UpperCAmelCase ) ): try: __snake_case : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __snake_case : Union[str, Any] = list(filter(lambda UpperCAmelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , UpperCAmelCase ) ) __snake_case : Tuple = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) ) if max_length is not None and len(UpperCAmelCase ) > max_length: __snake_case : Tuple = toks[:max_length] if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0: while len(UpperCAmelCase ) < min_length: __snake_case : int = toks + toks # toks_str = [t[1] for t in toks] __snake_case : int = [t[0] for t in toks] # Ensure consistency __snake_case : Optional[int] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) if " " not in output_txt and len(UpperCAmelCase ) > 1: __snake_case : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase ) ) if with_prefix_space: __snake_case : Optional[int] = " " + output_txt __snake_case : int = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) return output_txt, output_ids def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = self.perceiver_tokenizer __snake_case : List[str] = "Unicode €." __snake_case : str = tokenizer(UpperCAmelCase ) __snake_case : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["input_ids"] , UpperCAmelCase ) # decoding __snake_case : Any = tokenizer.decode(UpperCAmelCase ) self.assertEqual(UpperCAmelCase , "[CLS]Unicode €.[SEP]" ) __snake_case : Tuple = tokenizer("e è é ê ë" ) __snake_case : Any = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["input_ids"] , UpperCAmelCase ) # decoding __snake_case : List[Any] = tokenizer.decode(UpperCAmelCase ) self.assertEqual(UpperCAmelCase , "[CLS]e è é ê ë[SEP]" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" ) def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[int] = self.perceiver_tokenizer __snake_case : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off __snake_case : Dict = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on __snake_case : Dict = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) if FRAMEWORK != "jax": __snake_case : Any = list(batch.input_ids.numpy()[0] ) else: __snake_case : Dict = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : str = self.perceiver_tokenizer __snake_case : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] __snake_case : Tuple = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , UpperCAmelCase ) self.assertIn("attention_mask" , UpperCAmelCase ) self.assertNotIn("decoder_input_ids" , UpperCAmelCase ) self.assertNotIn("decoder_attention_mask" , UpperCAmelCase ) def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.perceiver_tokenizer __snake_case : Any = [ "Summary of the text.", "Another summary.", ] __snake_case : Any = tokenizer( text_target=UpperCAmelCase , max_length=32 , padding="max_length" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Union[str, Any] = " He is very happy, UNwant\u00E9d,running" __snake_case : str = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) tokenizer.save_pretrained(UpperCAmelCase ) __snake_case : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase ) __snake_case : Tuple = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) shutil.rmtree(UpperCAmelCase ) __snake_case : Any = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Any = tempfile.mkdtemp() __snake_case : List[Any] = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) __snake_case : str = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) __snake_case : List[Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) tokenizer.save_pretrained(UpperCAmelCase ) __snake_case : Dict = tokenizer.__class__.from_pretrained(UpperCAmelCase ) __snake_case : Optional[Any] = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCAmelCase ) def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: __snake_case : List[Any] = json.load(UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: __snake_case : str = json.load(UpperCAmelCase ) __snake_case : Tuple = [F"""<extra_id_{i}>""" for i in range(125 )] __snake_case : Any = added_tokens_extra_ids + [ "an_additional_special_token" ] __snake_case : Union[str, Any] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(UpperCAmelCase , UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(UpperCAmelCase , UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : List[Any] = tokenizer_class.from_pretrained( UpperCAmelCase , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : List[str] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCAmelCase )] __snake_case : List[str] = tokenizer_class.from_pretrained( UpperCAmelCase , additional_special_tokens=UpperCAmelCase , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' __snake_case : Dict = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , "�" ) def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self ) -> int: '''simple docstring''' pass def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : Tuple = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __snake_case : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] __snake_case : Union[str, Any] = tokenizer.convert_tokens_to_string(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
326
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) _UpperCamelCase = logging.getLogger(__name__) def lowerCAmelCase__( lowercase : str ) -> List[str]: __snake_case : int = git.Repo(search_parent_directories=lowercase ) __snake_case : Union[str, Any] = { "repo_id": str(lowercase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(lowercase , "git_log.json" ) , "w" ) as f: json.dump(lowercase , lowercase , indent=4 ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]: if params.n_gpu <= 0: __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = -1 __snake_case : Union[str, Any] = True __snake_case : Tuple = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 __snake_case : Optional[int] = int(os.environ["WORLD_SIZE"] ) __snake_case : int = int(os.environ["N_GPU_NODE"] ) __snake_case : Union[str, Any] = int(os.environ["RANK"] ) # number of nodes / node ID __snake_case : Optional[Any] = params.world_size // params.n_gpu_per_node __snake_case : Optional[Any] = params.global_rank // params.n_gpu_per_node __snake_case : Union[str, Any] = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 __snake_case : Any = 1 __snake_case : str = 0 __snake_case : Optional[Any] = 0 __snake_case : Dict = 0 __snake_case : int = 1 __snake_case : Optional[Any] = 1 __snake_case : Tuple = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __snake_case : List[Any] = params.node_id == 0 and params.local_rank == 0 __snake_case : List[Any] = params.n_nodes > 1 # summary __snake_case : List[Any] = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
326
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = '''▁''' _UpperCamelCase = {'''vocab_file''': '''spiece.model'''} _UpperCamelCase = { '''vocab_file''': { '''google/reformer-crime-and-punishment''': ( '''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model''' ) } } _UpperCamelCase = { '''google/reformer-crime-and-punishment''': 52_4288, } class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES UpperCAmelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Optional[Any] =["input_ids", "attention_mask"] def __init__( self , UpperCAmelCase , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=[] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None: '''simple docstring''' __snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) __snake_case : Union[str, Any] = vocab_file __snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase ) @property def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return self.sp_model.get_piece_size() def UpperCAmelCase ( self ) -> Dict[str, int]: '''simple docstring''' __snake_case : Optional[Any] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = self.__dict__.copy() __snake_case : str = None return state def __setstate__( self , UpperCAmelCase ) -> int: '''simple docstring''' __snake_case : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __snake_case : List[str] = {} __snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return self.sp_model.piece_to_id(UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple: '''simple docstring''' if index < self.sp_model.get_piece_size(): __snake_case : List[str] = self.sp_model.IdToPiece(UpperCAmelCase ) return token def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __snake_case : str = [] __snake_case : Optional[Any] = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCAmelCase ) + token __snake_case : Any = [] else: current_sub_tokens.append(UpperCAmelCase ) out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case : int = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: __snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,)
326
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : str =JukeboxTokenizer UpperCAmelCase_ : Tuple ={ "artist": "Zac Brown Band", "genres": "Country", "lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ", } @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) __snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : Optional[Any] = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def UpperCAmelCase ( self ) -> str: '''simple docstring''' import torch __snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) __snake_case : Tuple = tokenizer(**self.metas )["input_ids"] # fmt: off __snake_case : int = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
326
1
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
326
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : str UpperCAmelCase_ : str =None @staticmethod def UpperCAmelCase ( ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' raise NotImplementedError def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.is_available(): raise RuntimeError( F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def UpperCAmelCase ( cls ) -> Tuple: '''simple docstring''' return F"""`pip install {cls.pip_package or cls.name}`""" class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Optional[int] ="optuna" @staticmethod def UpperCAmelCase ( ) -> Union[str, Any]: '''simple docstring''' return is_optuna_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict: '''simple docstring''' return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' return default_hp_space_optuna(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : List[str] ="ray" UpperCAmelCase_ : Dict ="'ray[tune]'" @staticmethod def UpperCAmelCase ( ) -> str: '''simple docstring''' return is_ray_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' return default_hp_space_ray(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple ="sigopt" @staticmethod def UpperCAmelCase ( ) -> int: '''simple docstring''' return is_sigopt_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict: '''simple docstring''' return default_hp_space_sigopt(UpperCAmelCase ) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : str ="wandb" @staticmethod def UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' return is_wandb_available() def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' return default_hp_space_wandb(UpperCAmelCase ) _UpperCamelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowerCAmelCase__( ) -> str: __snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase ) > 0: __snake_case : Dict = available_backends[0].name if len(lowercase ) > 1: logger.info( f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
326
1