code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Any = 32
def A_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : List[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : Dict = 8
else:
_lowerCamelCase : List[str] = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
_lowerCamelCase : str = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : Dict = mocked_dataloaders # noqa: F811
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
_lowerCamelCase : List[str] = 2
# Initialize accelerator
_lowerCamelCase : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Dict = config["lr"]
_lowerCamelCase : Union[str, Any] = int(config["num_epochs"] )
_lowerCamelCase : List[str] = int(config["seed"] )
_lowerCamelCase : Dict = int(config["batch_size"] )
_lowerCamelCase : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_lowerCAmelCase )
def inner_training_loop(_lowerCAmelCase : Optional[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : List[str] = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Optional[int] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate scheduler
_lowerCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : List[Any] = model(**_lowerCAmelCase )
_lowerCamelCase : List[str] = outputs.loss
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**_lowerCAmelCase )
_lowerCamelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase : Tuple = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
_lowerCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase__ ( A , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = ort.SessionOptions()
_lowerCamelCase : str = False
return options
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
_lowerCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
_lowerCamelCase : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",revision="onnx",safety_checker=__A,feature_extractor=__A,provider=self.gpu_provider,sess_options=self.gpu_options,)
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : str = "A red cat sitting on a park bench"
_lowerCamelCase : Tuple = np.random.RandomState(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__A,image=__A,mask_image=__A,guidance_scale=7.5,num_inference_steps=1_0,generator=__A,output_type="np",)
_lowerCamelCase : Dict = output.images
_lowerCamelCase : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : List[str] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
_lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
_lowerCamelCase : Dict = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting",subfolder="scheduler",revision="onnx" )
_lowerCamelCase : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting",revision="onnx",scheduler=__A,safety_checker=__A,feature_extractor=__A,provider=self.gpu_provider,sess_options=self.gpu_options,)
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Tuple = "A red cat sitting on a park bench"
_lowerCamelCase : Tuple = np.random.RandomState(0 )
_lowerCamelCase : Tuple = pipe(
prompt=__A,image=__A,mask_image=__A,guidance_scale=7.5,num_inference_steps=2_0,generator=__A,output_type="np",)
_lowerCamelCase : str = output.images
_lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : List[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 44 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase__ ( A ):
def __init__( self : Dict,__A : AutoencoderKL,__A : CLIPTextModel,__A : CLIPTokenizer,__A : UNetaDConditionModel,__A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],__A : StableDiffusionSafetyChecker,__A : CLIPImageProcessor,):
super().__init__()
self.register_modules(
vae=__A,text_encoder=__A,tokenizer=__A,unet=__A,scheduler=__A,safety_checker=__A,feature_extractor=__A,)
def lowerCamelCase_ ( self : str,__A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCamelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def lowerCamelCase_ ( self : Tuple ):
self.enable_attention_slicing(__A )
@torch.no_grad()
def __call__( self : List[Any],__A : Union[str, List[str]],__A : int = 5_1_2,__A : int = 5_1_2,__A : int = 5_0,__A : float = 7.5,__A : Optional[Union[str, List[str]]] = None,__A : Optional[int] = 1,__A : float = 0.0,__A : Optional[torch.Generator] = None,__A : Optional[torch.FloatTensor] = None,__A : Optional[str] = "pil",__A : bool = True,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None,__A : int = 1,__A : Optional[torch.FloatTensor] = None,**__A : str,):
if isinstance(__A,__A ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,__A ):
_lowerCamelCase : Optional[int] = len(__A )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__A )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A,__A ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__A )}.' )
# get prompt text embeddings
_lowerCamelCase : Optional[Any] = self.tokenizer(
__A,padding="max_length",max_length=self.tokenizer.model_max_length,return_tensors="pt",)
_lowerCamelCase : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCamelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_lowerCamelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = text_embeddings.shape
_lowerCamelCase : Optional[Any] = text_embeddings.repeat(1,__A,1 )
_lowerCamelCase : Any = text_embeddings.view(bs_embed * num_images_per_prompt,__A,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCamelCase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCamelCase : List[str]
if negative_prompt is None:
_lowerCamelCase : Any = [""]
elif type(__A ) is not type(__A ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !='
f' {type(__A )}.' )
elif isinstance(__A,__A ):
_lowerCamelCase : Optional[Any] = [negative_prompt]
elif batch_size != len(__A ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
_lowerCamelCase : List[Any] = negative_prompt
_lowerCamelCase : Any = text_input_ids.shape[-1]
_lowerCamelCase : Optional[Any] = self.tokenizer(
__A,padding="max_length",max_length=__A,truncation=__A,return_tensors="pt",)
_lowerCamelCase : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase : Optional[Any] = uncond_embeddings.shape[1]
_lowerCamelCase : Union[str, Any] = uncond_embeddings.repeat(__A,__A,1 )
_lowerCamelCase : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt,__A,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCamelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCamelCase : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
_lowerCamelCase : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCamelCase : List[Any] = torch.randn(
__A,generator=__A,device="cpu",dtype=__A ).to(self.device )
_lowerCamelCase : List[Any] = torch.randn(__A,generator=__A,device="cpu",dtype=__A ).to(
self.device )
else:
_lowerCamelCase : Any = torch.randn(
__A,generator=__A,device=self.device,dtype=__A )
_lowerCamelCase : Any = torch.randn(__A,generator=__A,device=self.device,dtype=__A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_lowerCamelCase : Tuple = latents_reference.to(self.device )
_lowerCamelCase : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_lowerCamelCase : str = (latents_shape[3] - latents_shape_reference[3]) // 2
_lowerCamelCase : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
_lowerCamelCase : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_lowerCamelCase : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_lowerCamelCase : int = 0 if dx < 0 else dx
_lowerCamelCase : Dict = 0 if dy < 0 else dy
_lowerCamelCase : Optional[Any] = max(-dx,0 )
_lowerCamelCase : List[Any] = max(-dy,0 )
# import pdb
# pdb.set_trace()
_lowerCamelCase : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCamelCase : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCamelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCamelCase : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCamelCase : int = {}
if accepts_eta:
_lowerCamelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Dict = self.scheduler.scale_model_input(__A,__A )
# predict the noise residual
_lowerCamelCase : Any = self.unet(__A,__A,encoder_hidden_states=__A ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Dict = self.scheduler.step(__A,__A,__A,**__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A,__A,__A )
_lowerCamelCase : Any = 1 / 0.18215 * latents
_lowerCamelCase : List[str] = self.vae.decode(__A ).sample
_lowerCamelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCamelCase : List[Any] = image.cpu().permute(0,2,3,1 ).float().numpy()
if self.safety_checker is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(self.numpy_to_pil(__A ),return_tensors="pt" ).to(
self.device )
_lowerCamelCase , _lowerCamelCase : str = self.safety_checker(
images=__A,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_lowerCamelCase : Union[str, Any] = None
if output_type == "pil":
_lowerCamelCase : Any = self.numpy_to_pil(__A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__A,nsfw_content_detected=__A )
| 44 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[int],__A : Any,__A : Tuple=7,__A : List[Any]=3,__A : str=1_8,__A : Tuple=3_0,__A : Dict=4_0_0,__A : int=True,__A : List[Any]=None,__A : Any=True,__A : int=False,__A : str=True,__A : int=True,__A : Any=[0.5, 0.5, 0.5],__A : Dict=[0.5, 0.5, 0.5],):
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Tuple = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : str = do_resize
_lowerCamelCase : int = size if size is not None else {"height": 1_8, "width": 2_0}
_lowerCamelCase : Union[str, Any] = do_thumbnail
_lowerCamelCase : Optional[int] = do_align_axis
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Tuple = do_normalize
_lowerCamelCase : str = image_mean
_lowerCamelCase : int = image_std
def lowerCamelCase_ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A,"do_resize" ) )
self.assertTrue(hasattr(__A,"size" ) )
self.assertTrue(hasattr(__A,"do_thumbnail" ) )
self.assertTrue(hasattr(__A,"do_align_long_axis" ) )
self.assertTrue(hasattr(__A,"do_pad" ) )
self.assertTrue(hasattr(__A,"do_normalize" ) )
self.assertTrue(hasattr(__A,"image_mean" ) )
self.assertTrue(hasattr(__A,"image_std" ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"height": 1_8, "width": 2_0} )
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict,size=4_2 )
self.assertEqual(image_processor.size,{"height": 4_2, "width": 4_2} )
# Previous config had dimensions in (width, height) order
_lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict,size=(4_2, 8_4) )
self.assertEqual(image_processor.size,{"height": 8_4, "width": 4_2} )
def lowerCamelCase_ ( self : List[Any] ):
pass
@is_flaky()
def lowerCamelCase_ ( self : Dict ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A,Image.Image )
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : int = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
@is_flaky()
def lowerCamelCase_ ( self : List[str] ):
# Initialize image_processing
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A,np.ndarray )
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : List[Any] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
@is_flaky()
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : int = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
| 44 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = BlenderbotSmallTokenizer
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
super().setUp()
_lowerCamelCase : Tuple = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
_lowerCamelCase : int = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
_lowerCamelCase : List[str] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Tuple,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : List[Any],__A : List[str] ):
_lowerCamelCase : Optional[Any] = "adapt act apte"
_lowerCamelCase : Optional[Any] = "adapt act apte"
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : int = BlenderbotSmallTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Optional[Any] = "adapt act apte"
_lowerCamelCase : Union[str, Any] = ["adapt", "act", "ap@@", "te"]
_lowerCamelCase : Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
_lowerCamelCase : Optional[int] = "I am a small frog."
_lowerCamelCase : Optional[int] = tok([src_text],padding=__A,truncation=__A )["input_ids"]
_lowerCamelCase : List[str] = tok.batch_decode(__A,skip_special_tokens=__A,clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_lowerCamelCase : Dict = "I am a small frog ."
_lowerCamelCase : int = "."
_lowerCamelCase : Tuple = tok(__A )["input_ids"]
_lowerCamelCase : int = tok(__A )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 44 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict,__A : Tuple,__A : str=7,__A : Dict=3,__A : Any=1_8,__A : List[str]=3_0,__A : str=4_0_0,__A : Union[str, Any]=True,__A : Optional[int]=None,__A : List[Any]=True,__A : Optional[int]=None,__A : Dict=True,__A : str=[0.5, 0.5, 0.5],__A : Dict=[0.5, 0.5, 0.5],__A : Any=False,):
_lowerCamelCase : List[str] = size if size is not None else {"height": 2_0, "width": 2_0}
_lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
_lowerCamelCase : str = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : str = image_size
_lowerCamelCase : Tuple = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : Tuple = do_resize
_lowerCamelCase : Union[str, Any] = size
_lowerCamelCase : Any = do_center_crop
_lowerCamelCase : str = crop_size
_lowerCamelCase : Dict = do_normalize
_lowerCamelCase : List[str] = image_mean
_lowerCamelCase : Dict = image_std
_lowerCamelCase : Dict = do_reduce_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_lowerCamelCase : Optional[Any] = Image.open(dataset[0]["file"] )
_lowerCamelCase : Any = Image.open(dataset[1]["file"] )
return image, map
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_lowerCamelCase : Dict = Image.open(ds[0]["file"] )
_lowerCamelCase : Dict = Image.open(ds[1]["file"] )
_lowerCamelCase : Optional[Any] = Image.open(ds[2]["file"] )
_lowerCamelCase : List[str] = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = BeitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A,"do_resize" ) )
self.assertTrue(hasattr(__A,"size" ) )
self.assertTrue(hasattr(__A,"do_center_crop" ) )
self.assertTrue(hasattr(__A,"center_crop" ) )
self.assertTrue(hasattr(__A,"do_normalize" ) )
self.assertTrue(hasattr(__A,"image_mean" ) )
self.assertTrue(hasattr(__A,"image_std" ) )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"height": 2_0, "width": 2_0} )
self.assertEqual(image_processor.crop_size,{"height": 1_8, "width": 1_8} )
self.assertEqual(image_processor.do_reduce_labels,__A )
_lowerCamelCase : str = self.image_processing_class.from_dict(
self.image_processor_dict,size=4_2,crop_size=8_4,reduce_labels=__A )
self.assertEqual(image_processor.size,{"height": 4_2, "width": 4_2} )
self.assertEqual(image_processor.crop_size,{"height": 8_4, "width": 8_4} )
self.assertEqual(image_processor.do_reduce_labels,__A )
def lowerCamelCase_ ( self : int ):
pass
def lowerCamelCase_ ( self : List[Any] ):
# Initialize image_processing
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A,Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Optional[int] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A,np.ndarray )
# Test not batched input
_lowerCamelCase : Tuple = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Optional[int] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : int ):
# Initialize image_processing
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Union[str, Any] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : Dict ):
# Initialize image_processing
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
_lowerCamelCase : Dict = []
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0],maps[0],return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(
encoding["labels"].shape,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(encoding["labels"].dtype,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test batched
_lowerCamelCase : Tuple = image_processing(__A,__A,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(
encoding["labels"].shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(encoding["labels"].dtype,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
_lowerCamelCase , _lowerCamelCase : int = prepare_semantic_single_inputs()
_lowerCamelCase : Union[str, Any] = image_processing(__A,__A,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(
encoding["labels"].shape,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(encoding["labels"].dtype,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test batched input (PIL images)
_lowerCamelCase , _lowerCamelCase : Tuple = prepare_semantic_batch_inputs()
_lowerCamelCase : List[str] = image_processing(__A,__A,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(
encoding["labels"].shape,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
self.assertEqual(encoding["labels"].dtype,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
def lowerCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowerCamelCase , _lowerCamelCase : int = prepare_semantic_single_inputs()
_lowerCamelCase : int = image_processing(__A,__A,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_5_0 )
_lowerCamelCase : Any = True
_lowerCamelCase : List[Any] = image_processing(__A,__A,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : int = 16
UpperCAmelCase_ : Union[str, Any] = 32
def A_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Optional[int] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : List[Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : str = 8
else:
_lowerCamelCase : Optional[Any] = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
_lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : List[Any] = mocked_dataloaders # noqa: F811
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
_lowerCamelCase : Union[str, Any] = 2
# Initialize accelerator
_lowerCamelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Tuple = config["lr"]
_lowerCamelCase : int = int(config["num_epochs"] )
_lowerCamelCase : Any = int(config["seed"] )
_lowerCamelCase : Tuple = int(config["batch_size"] )
_lowerCamelCase : Optional[int] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Any = MAX_GPU_BATCH_SIZE
set_seed(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Optional[int] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : Dict = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
_lowerCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : Any = model(**_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.loss
_lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_lowerCamelCase : Union[str, Any] = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Any = model(**_lowerCAmelCase )
_lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase : int = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_lowerCamelCase : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
_lowerCamelCase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 1 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : Optional[Any] = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : int = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
UpperCAmelCase_ : Optional[Any] = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
UpperCAmelCase_ : Union[str, Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any="dummy_doc" ):
"""simple docstring"""
_lowerCamelCase : List[Any] = {doc: key_lines}
_lowerCamelCase : Union[str, Any] = {doc: sys_lines}
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = reader.get_doc_mentions(_lowerCAmelCase , key_doc_lines[doc] , _lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCamelCase : Optional[int] = reader.set_annotated_parse_trees(_lowerCAmelCase , key_doc_lines[doc] , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = reader.get_doc_mentions(_lowerCAmelCase , sys_doc_lines[doc] , _lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCamelCase : Union[str, Any] = reader.set_annotated_parse_trees(_lowerCAmelCase , key_doc_lines[doc] , _lowerCAmelCase , _lowerCAmelCase )
if remove_nested:
_lowerCamelCase , _lowerCamelCase : Optional[int] = reader.remove_nested_coref_mentions(_lowerCAmelCase , _lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCamelCase , _lowerCamelCase : Optional[int] = reader.remove_nested_coref_mentions(_lowerCAmelCase , _lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCamelCase : List[str] = reader.get_mention_assignments(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Dict = reader.get_mention_assignments(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"Number of resulting singleton clusters in the key "
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"files, respectively" )
return doc_coref_infos
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = get_coref_infos(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Dict = 0
for name, metric in metrics:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = evaluator.evaluate_documents(_lowerCAmelCase , _lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
_lowerCamelCase : Optional[Any] = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"conll_score": conll} )
return output_scores
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
_lowerCamelCase : List[Any] = line.split()[5]
if not parse_col == "-":
_lowerCamelCase : Tuple = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ),codebase_urls=["https://github.com/ns-moosavi/coval"],reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
],)
def lowerCamelCase_ ( self : Dict,__A : List[str],__A : List[str],__A : str=True,__A : int=False,__A : Optional[Any]=False,__A : Union[str, Any]=False ):
_lowerCamelCase : Tuple = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
_lowerCamelCase : Optional[Any] = util.check_gold_parse_annotation(__A )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCamelCase : Dict = evaluate(
key_lines=__A,sys_lines=__A,metrics=__A,NP_only=__A,remove_nested=__A,keep_singletons=__A,min_span=__A,)
return score
| 44 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def A_ ( _lowerCAmelCase : dict , _lowerCAmelCase : str , _lowerCAmelCase : set , _lowerCAmelCase : set , _lowerCAmelCase : dict , _lowerCAmelCase : dict , _lowerCAmelCase : PriorityQueue , _lowerCAmelCase : dict , _lowerCAmelCase : float | int , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_lowerCamelCase : str = cst_fwd.get(_lowerCAmelCase , np.inf )
_lowerCamelCase : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_lowerCamelCase : Dict = new_cost_f
_lowerCamelCase : str = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_lowerCamelCase : List[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : dict , _lowerCAmelCase : dict ):
"""simple docstring"""
_lowerCamelCase : Dict = -1
_lowerCamelCase : List[Any] = set()
_lowerCamelCase : Optional[int] = set()
_lowerCamelCase : List[Any] = {source: 0}
_lowerCamelCase : Dict = {destination: 0}
_lowerCamelCase : List[str] = {source: None}
_lowerCamelCase : Optional[int] = {destination: None}
_lowerCamelCase : PriorityQueue[Any] = PriorityQueue()
_lowerCamelCase : PriorityQueue[Any] = PriorityQueue()
_lowerCamelCase : List[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_lowerCamelCase , _lowerCamelCase : List[str] = queue_forward.get()
visited_forward.add(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[Any] = queue_backward.get()
visited_backward.add(_lowerCAmelCase )
_lowerCamelCase : List[Any] = pass_and_relaxation(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
_lowerCamelCase : List[str] = pass_and_relaxation(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_lowerCamelCase : Any = shortest_distance
return shortest_path_distance
UpperCAmelCase_ : Optional[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
UpperCAmelCase_ : Dict = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A_ ( _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = argparse.ArgumentParser(add_help=_lowerCAmelCase , allow_abbrev=_lowerCAmelCase )
# The main config parser
_lowerCamelCase : int = config_command_parser(_lowerCAmelCase )
# The subparser to add commands to
_lowerCamelCase : Any = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(_lowerCAmelCase , parents=[parent_parser] )
update_command_parser(_lowerCAmelCase , parents=[parent_parser] )
return config_parser
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = get_config_parser()
_lowerCamelCase : Optional[int] = config_parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : str = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 44 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase_ : int = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : int,__A : int,__A : float,**__A : List[str] ):
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[Any] = sampling_rate
_lowerCamelCase : str = padding_value
_lowerCamelCase : List[Any] = kwargs.pop("padding_side","right" )
_lowerCamelCase : int = kwargs.pop("return_attention_mask",__A )
super().__init__(**__A )
def lowerCamelCase_ ( self : Dict,__A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
],__A : Union[bool, str, PaddingStrategy] = True,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__A,(list, tuple) ) and isinstance(processed_features[0],(dict, BatchFeature) ):
_lowerCamelCase : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
_lowerCamelCase : Any = processed_features[self.model_input_names[0]]
_lowerCamelCase : int = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__A ) == 0:
if return_attention_mask:
_lowerCamelCase : Tuple = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowerCamelCase : Union[str, Any] = required_input[0]
if isinstance(__A,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowerCamelCase : Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__A ):
_lowerCamelCase : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__A ):
_lowerCamelCase : Union[str, Any] = "tf"
elif is_torch_tensor(__A ):
_lowerCamelCase : str = "pt"
elif isinstance(__A,(int, float, list, tuple, np.ndarray) ):
_lowerCamelCase : Tuple = "np"
else:
raise ValueError(
f'type of {first_element} unknown: {type(__A )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0],(int, float) ):
_lowerCamelCase : List[str] = to_numpy(__A )
else:
_lowerCamelCase : str = [to_numpy(__A ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowerCamelCase : int = self._get_padding_strategies(padding=__A,max_length=__A )
_lowerCamelCase : Optional[Any] = processed_features[self.model_input_names[0]]
_lowerCamelCase : Union[str, Any] = len(__A )
if not all(len(__A ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
_lowerCamelCase : Optional[Any] = []
for i in range(__A ):
_lowerCamelCase : Dict = {k: v[i] for k, v in processed_features.items()}
# truncation
_lowerCamelCase : Optional[int] = self._truncate(
__A,max_length=__A,pad_to_multiple_of=__A,truncation=__A,)
truncated_inputs.append(__A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowerCamelCase : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowerCamelCase : Any = PaddingStrategy.MAX_LENGTH
_lowerCamelCase : str = {}
for i in range(__A ):
# padding
_lowerCamelCase : Tuple = self._pad(
truncated_inputs[i],max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
for key, value in outputs.items():
if key not in batch_outputs:
_lowerCamelCase : List[Any] = []
if value.dtype is np.dtype(np.floataa ):
_lowerCamelCase : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(__A )
return BatchFeature(__A,tensor_type=__A )
def lowerCamelCase_ ( self : List[Any],__A : Union[Dict[str, np.ndarray], BatchFeature],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowerCamelCase : List[Any] = len(__A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCamelCase : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowerCamelCase : Tuple = np.ones(len(__A ),dtype=np.intaa )
if needs_to_be_padded:
_lowerCamelCase : List[Any] = max_length - len(__A )
if self.padding_side == "right":
if return_attention_mask:
_lowerCamelCase : Tuple = np.pad(
processed_features["attention_mask"],(0, difference) )
_lowerCamelCase : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowerCamelCase : Dict = np.pad(
__A,__A,"constant",constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowerCamelCase : int = np.pad(
processed_features["attention_mask"],(difference, 0) )
_lowerCamelCase : Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowerCamelCase : Union[str, Any] = np.pad(
__A,__A,"constant",constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def lowerCamelCase_ ( self : int,__A : Union[Dict[str, np.ndarray], BatchFeature],__A : Optional[int] = None,__A : Optional[int] = None,__A : Optional[bool] = None,):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
_lowerCamelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCamelCase : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCamelCase : Optional[Any] = len(__A ) > max_length
if needs_to_be_truncated:
_lowerCamelCase : List[str] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowerCamelCase : Optional[int] = processed_features["attention_mask"][:max_length]
return processed_features
def lowerCamelCase_ ( self : int,__A : List[str]=False,__A : Optional[int]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
_lowerCamelCase : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__A,__A ):
_lowerCamelCase : Optional[Any] = PaddingStrategy(__A )
elif isinstance(__A,__A ):
_lowerCamelCase : Tuple = padding
else:
_lowerCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaControlnetImgaImgPipeline
lowerCAmelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCAmelCase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def lowerCamelCase_ ( self : Any ):
return 3_2
@property
def lowerCamelCase_ ( self : List[str] ):
return 3_2
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Any ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
return 1_0_0
@property
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_lowerCamelCase : Any = UNetaDConditionModel(**__A )
return model
@property
def lowerCamelCase_ ( self : Any ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = self.dummy_unet
_lowerCamelCase : int = self.dummy_movq
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_lowerCamelCase : Optional[Any] = DDIMScheduler(**__A )
_lowerCamelCase : Dict = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCamelCase_ ( self : Dict,__A : List[Any],__A : int=0 ):
_lowerCamelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__A )
# create init_image
_lowerCamelCase : Any = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Optional[Any] = image.cpu().permute(0,2,3,1 )[0]
_lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
_lowerCamelCase : str = floats_tensor((1, 3, 6_4, 6_4),rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("mps" ):
_lowerCamelCase : int = torch.manual_seed(__A )
else:
_lowerCamelCase : Union[str, Any] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : Optional[Any] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = "cpu"
_lowerCamelCase : List[Any] = self.get_dummy_components()
_lowerCamelCase : Any = self.pipeline_class(**__A )
_lowerCamelCase : Optional[int] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(__A ) )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Optional[Any] = pipe(
**self.get_dummy_inputs(__A ),return_dict=__A,)[0]
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : Any = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
_lowerCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_lowerCamelCase : Any = init_image.resize((5_1_2, 5_1_2) )
_lowerCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
_lowerCamelCase : List[str] = torch.from_numpy(np.array(__A ) ).float() / 255.0
_lowerCamelCase : Union[str, Any] = hint.permute(2,0,1 ).unsqueeze(0 )
_lowerCamelCase : int = "A robot, 4k photo"
_lowerCamelCase : Optional[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior",torch_dtype=torch.floataa )
pipe_prior.to(__A )
_lowerCamelCase : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth",torch_dtype=torch.floataa )
_lowerCamelCase : Optional[int] = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCamelCase , _lowerCamelCase : List[Any] = pipe_prior(
__A,image=__A,strength=0.85,generator=__A,negative_prompt="",).to_tuple()
_lowerCamelCase : Any = pipeline(
image=__A,image_embeds=__A,negative_image_embeds=__A,hint=__A,generator=__A,num_inference_steps=1_0_0,height=5_1_2,width=5_1_2,strength=0.5,output_type="np",)
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__A,__A )
| 44 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 1 |
'''simple docstring'''
import numpy
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : numpy.ndarray,__A : numpy.ndarray ):
_lowerCamelCase : Tuple = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCamelCase : str = numpy.random.rand(
self.input_array.shape[1],4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCamelCase : str = numpy.random.rand(
4,3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCamelCase : List[Any] = numpy.random.rand(3,1 )
# Real output values provided.
_lowerCamelCase : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCamelCase : Tuple = numpy.zeros(output_array.shape )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = sigmoid(
numpy.dot(self.input_array,self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCamelCase : Dict = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer,self.first_hidden_layer_and_second_hidden_layer_weights,) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCamelCase : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer,self.second_hidden_layer_and_output_layer_weights,) )
return self.layer_between_second_hidden_layer_and_output
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T,2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ),)
_lowerCamelCase : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T,numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ),self.second_hidden_layer_and_output_layer_weights.T,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ),)
_lowerCamelCase : Tuple = numpy.dot(
self.input_array.T,numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ),self.second_hidden_layer_and_output_layer_weights.T,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ),self.first_hidden_layer_and_second_hidden_layer_weights.T,)
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ),)
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCamelCase_ ( self : List[Any],__A : numpy.ndarray,__A : int,__A : bool ):
for iteration in range(1,iterations + 1 ):
_lowerCamelCase : int = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCamelCase : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def lowerCamelCase_ ( self : Optional[int],__A : numpy.ndarray ):
_lowerCamelCase : List[Any] = input_arr
_lowerCamelCase : Optional[Any] = sigmoid(
numpy.dot(self.array,self.input_layer_and_first_hidden_layer_weights ) )
_lowerCamelCase : Dict = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer,self.first_hidden_layer_and_second_hidden_layer_weights,) )
_lowerCamelCase : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer,self.second_hidden_layer_and_output_layer_weights,) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def A_ ( _lowerCAmelCase : numpy.ndarray ):
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def A_ ( _lowerCAmelCase : numpy.ndarray ):
"""simple docstring"""
return (value) * (1 - (value))
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCamelCase : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCamelCase : Tuple = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCAmelCase , output_array=_lowerCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCAmelCase , iterations=10 , give_loss=_lowerCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 44 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
UpperCAmelCase_ : List[Any] = 'naver-clova-ix/donut-base'
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = DonutProcessor.from_pretrained(__A )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Tuple = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
_lowerCamelCase : List[str] = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
_lowerCamelCase : Optional[int] = self.processor.tokenajson(__A )
self.assertDictEqual(__A,__A )
| 44 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase_ : List[str] = random.Random()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=1.0 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
if rng is None:
_lowerCamelCase : List[Any] = global_rng
_lowerCamelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict,__A : List[Any],__A : Any=7,__A : Dict=4_0_0,__A : Union[str, Any]=2_0_0_0,__A : Any=1_0,__A : Dict=1_6_0,__A : List[Any]=8,__A : Optional[int]=0.0,__A : int=4_0_0_0,__A : Dict=False,__A : List[Any]=True,):
_lowerCamelCase : int = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : List[Any] = min_seq_length
_lowerCamelCase : Optional[Any] = max_seq_length
_lowerCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : Optional[int] = padding_value
_lowerCamelCase : str = sampling_rate
_lowerCamelCase : int = return_attention_mask
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : str = feature_size
_lowerCamelCase : Tuple = chunk_length
_lowerCamelCase : List[Any] = hop_length
def lowerCamelCase_ ( self : str ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self : int,__A : str=False,__A : Union[str, Any]=False ):
def _flatten(__A : Tuple ):
return list(itertools.chain(*__A ) )
if equal_length:
_lowerCamelCase : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Dict = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Tuple = WhisperFeatureExtractionTester(self )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[int] = feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class.from_pretrained(__A )
_lowerCamelCase : Optional[int] = feat_extract_first.to_dict()
_lowerCamelCase : Dict = feat_extract_second.to_dict()
_lowerCamelCase : str = feat_extract_first.mel_filters
_lowerCamelCase : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A,__A ) )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Any = os.path.join(__A,"feat_extract.json" )
feat_extract_first.to_json_file(__A )
_lowerCamelCase : int = self.feature_extraction_class.from_json_file(__A )
_lowerCamelCase : Tuple = feat_extract_first.to_dict()
_lowerCamelCase : Any = feat_extract_second.to_dict()
_lowerCamelCase : Dict = feat_extract_first.mel_filters
_lowerCamelCase : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A,__A ) )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
_lowerCamelCase : Union[str, Any] = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase : Optional[Any] = feature_extractor(__A,padding="max_length",return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase : Union[str, Any] = feature_extractor(speech_inputs[0],return_tensors="np" ).input_features
_lowerCamelCase : Dict = feature_extractor(np_speech_inputs[0],return_tensors="np" ).input_features
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# Test batched
_lowerCamelCase : str = feature_extractor(__A,return_tensors="np" ).input_features
_lowerCamelCase : List[Any] = feature_extractor(__A,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A,__A ):
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase : Dict = np.asarray(__A )
_lowerCamelCase : int = feature_extractor(__A,return_tensors="np" ).input_features
_lowerCamelCase : Any = feature_extractor(__A,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A,__A ):
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# Test truncation required
_lowerCamelCase : str = [floats_list((1, x) )[0] for x in range(2_0_0,(feature_extractor.n_samples + 5_0_0),2_0_0 )]
_lowerCamelCase : Dict = [np.asarray(__A ) for speech_input in speech_inputs]
_lowerCamelCase : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase : Union[str, Any] = [np.asarray(__A ) for speech_input in speech_inputs_truncated]
_lowerCamelCase : Tuple = feature_extractor(__A,return_tensors="np" ).input_features
_lowerCamelCase : str = feature_extractor(__A,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A,__A ):
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
def lowerCamelCase_ ( self : Any ):
import torch
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : str = np.random.rand(1_0_0,3_2 ).astype(np.floataa )
_lowerCamelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : List[Any] = feature_extractor.pad([{"input_features": inputs}],return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase : List[str] = feature_extractor.pad([{"input_features": inputs}],return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase_ ( self : Dict,__A : Optional[Any] ):
_lowerCamelCase : List[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy","clean",split="validation" )
# automatic decoding with librispeech
_lowerCamelCase : Dict = ds.sort("id" ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self : Any ):
# fmt: off
_lowerCamelCase : Optional[int] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCamelCase : Optional[Any] = self._load_datasamples(1 )
_lowerCamelCase : Union[str, Any] = WhisperFeatureExtractor()
_lowerCamelCase : str = feature_extractor(__A,return_tensors="pt" ).input_features
self.assertEqual(input_features.shape,(1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0],__A,atol=1e-4 ) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : List[Any] = self._load_datasamples(1 )[0]
_lowerCamelCase : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase : Any = feat_extract.zero_mean_unit_var_norm([audio],attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1e-3 ) )
| 44 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = state_dict.pop(_lowerCAmelCase )
_lowerCamelCase : List[Any] = val
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : List[str] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_lowerCamelCase : Optional[Any] = value
else:
_lowerCamelCase : List[Any] = value
return new_state_dict
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : str = ""
if is_panoptic:
_lowerCamelCase : Any = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[:256, :]
_lowerCamelCase : List[str] = in_proj_bias[:256]
_lowerCamelCase : Any = in_proj_weight[256:512, :]
_lowerCamelCase : Optional[int] = in_proj_bias[256:512]
_lowerCamelCase : Dict = in_proj_weight[-256:, :]
_lowerCamelCase : List[str] = in_proj_bias[-256:]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCamelCase : str = "resnet101"
if "dc5" in model_name:
_lowerCamelCase : str = True
_lowerCamelCase : List[Any] = "panoptic" in model_name
if is_panoptic:
_lowerCamelCase : Optional[int] = 250
else:
_lowerCamelCase : Tuple = 91
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "coco-detection-id2label.json"
_lowerCamelCase : str = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Union[str, Any] = idalabel
_lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCamelCase : Optional[Any] = "coco_panoptic" if is_panoptic else "coco_detection"
_lowerCamelCase : Dict = ConditionalDetrImageProcessor(format=_lowerCAmelCase )
# prepare image
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
_lowerCamelCase : Dict = torch.hub.load("DeppMeng/ConditionalDETR" , _lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
_lowerCamelCase : str = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCamelCase : List[str] = "conditional_detr." + src
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[int] = rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase , is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : str = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_lowerCamelCase : Dict = state_dict.pop(_lowerCAmelCase )
_lowerCamelCase : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : Optional[int] = state_dict.pop(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_lowerCamelCase : str = state_dict.pop(_lowerCAmelCase )
_lowerCamelCase : List[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_lowerCamelCase : Optional[Any] = state_dict.pop(_lowerCAmelCase )
_lowerCamelCase : Tuple = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : str = ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCAmelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_lowerCamelCase : int = conditional_detr(_lowerCAmelCase )
_lowerCamelCase : List[str] = model(_lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 44 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : str = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_lowerCamelCase : int = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
_lowerCamelCase : List[Any] = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
_lowerCamelCase : List[str] = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
_lowerCamelCase : int = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
_lowerCamelCase : Union[str, Any] = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
_lowerCamelCase : Any = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
_lowerCamelCase : str = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
_lowerCamelCase : Optional[Any] = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
_lowerCamelCase : int = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
_lowerCamelCase : Union[str, Any] = key.replace("image_encoder.module" , "flava.image_model" )
_lowerCamelCase : List[Any] = key.replace("text_encoder.module" , "flava.text_model" )
_lowerCamelCase : Union[str, Any] = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
_lowerCamelCase : Any = key.replace("mm_encoder.module" , "flava.multimodal_model" )
_lowerCamelCase : Optional[Any] = key.replace("text_projection" , "flava.text_projection" )
_lowerCamelCase : Union[str, Any] = key.replace("image_projection" , "flava.image_projection" )
_lowerCamelCase : Dict = value.float()
for key, value in codebook_state_dict.items():
_lowerCamelCase : Union[str, Any] = value
return upgrade
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=None ):
"""simple docstring"""
if config_path is not None:
_lowerCamelCase : Optional[Any] = FlavaConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : List[Any] = FlavaConfig()
_lowerCamelCase : Union[str, Any] = FlavaForPreTraining(_lowerCAmelCase ).eval()
_lowerCamelCase : List[Any] = convert_dalle_checkpoint(_lowerCAmelCase , _lowerCAmelCase , save_checkpoint=_lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ):
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
else:
_lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )
_lowerCamelCase : Union[str, Any] = upgrade_state_dict(_lowerCAmelCase , _lowerCAmelCase )
hf_model.load_state_dict(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = hf_model.state_dict()
_lowerCamelCase : Dict = count_parameters(_lowerCAmelCase )
_lowerCamelCase : Tuple = count_parameters(_lowerCAmelCase ) + count_parameters(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 44 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
"""simple docstring"""
if gpta_config_file == "":
_lowerCamelCase : Any = GPTaConfig()
else:
_lowerCamelCase : int = GPTaConfig.from_json_file(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = GPTaModel(_lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
_lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
UpperCAmelCase_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 44 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Any,__A : Union[str, Any],__A : Optional[int]=7,__A : Optional[Any]=3,__A : List[str]=1_8,__A : Any=3_0,__A : str=4_0_0,__A : str=True,__A : Tuple=None,__A : Tuple=True,__A : List[str]=None,__A : List[str]=True,):
_lowerCamelCase : List[str] = size if size is not None else {"shortest_edge": 2_0}
_lowerCamelCase : str = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : Optional[Any] = min_resolution
_lowerCamelCase : List[Any] = max_resolution
_lowerCamelCase : Any = do_resize
_lowerCamelCase : int = size
_lowerCamelCase : Optional[Any] = do_center_crop
_lowerCamelCase : Optional[Any] = crop_size
_lowerCamelCase : List[Any] = do_flip_channel_order
def lowerCamelCase_ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A,"do_resize" ) )
self.assertTrue(hasattr(__A,"size" ) )
self.assertTrue(hasattr(__A,"do_center_crop" ) )
self.assertTrue(hasattr(__A,"center_crop" ) )
self.assertTrue(hasattr(__A,"do_flip_channel_order" ) )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size,{"height": 1_8, "width": 1_8} )
_lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict,size=4_2,crop_size=8_4 )
self.assertEqual(image_processor.size,{"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size,{"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self : Dict ):
pass
def lowerCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A,Image.Image )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : str ):
# Initialize image_processing
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A,np.ndarray )
# Test not batched input
_lowerCamelCase : Dict = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
def lowerCamelCase_ ( self : Dict ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
# Test not batched input
_lowerCamelCase : str = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
# Test batched
_lowerCamelCase : Optional[Any] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
),)
| 44 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : Dict,__A : Optional[int]=1_3,__A : Optional[Any]=3_0,__A : Any=2,__A : Any=3,__A : List[Any]=True,__A : List[str]=True,__A : List[str]=3_2,__A : List[str]=2,__A : List[Any]=4,__A : int=3_7,__A : Any="gelu",__A : Optional[Any]=0.1,__A : List[Any]=0.1,__A : Optional[int]=1_0,__A : Optional[int]=0.02,__A : Any=3,__A : Optional[Any]=None,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : int = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[Any] = num_patches + 1
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Tuple ):
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__A,initializer_range=self.initializer_range,)
def lowerCamelCase_ ( self : Any,__A : Any,__A : Dict,__A : Any ):
_lowerCamelCase : int = TFViTModel(config=__A )
_lowerCamelCase : Dict = model(__A,training=__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowerCamelCase : Dict = self.image_size // 2
_lowerCamelCase : Any = pixel_values[:, :, :image_size, :image_size]
_lowerCamelCase : int = model(__A,interpolate_pos_encoding=__A,training=__A )
_lowerCamelCase : int = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any],__A : Any,__A : List[str],__A : Optional[Any] ):
_lowerCamelCase : Optional[int] = self.type_sequence_label_size
_lowerCamelCase : Dict = TFViTForImageClassification(__A )
_lowerCamelCase : Any = model(__A,labels=__A,training=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowerCamelCase : Union[str, Any] = self.image_size // 2
_lowerCamelCase : int = pixel_values[:, :, :image_size, :image_size]
_lowerCamelCase : Any = model(__A,interpolate_pos_encoding=__A,training=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Dict = TFViTForImageClassification(__A )
_lowerCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Any = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = TFViTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self,config_class=__A,has_text_modality=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__A )
_lowerCamelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Tuple ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="tf" )
# forward pass
_lowerCamelCase : Any = model(**__A )
# verify the logits
_lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Optional[int] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3],__A,atol=1e-4 )
| 44 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = GPTSanJapaneseTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = {'do_clean_text': False, 'add_prefix_space': False}
def lowerCamelCase_ ( self : Union[str, Any] ):
super().setUp()
# fmt: off
_lowerCamelCase : Dict = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_lowerCamelCase : Union[str, Any] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_lowerCamelCase : int = {"unk_token": "<unk>"}
_lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file,"w" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def lowerCamelCase_ ( self : List[Any],**__A : Optional[int] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : List[str],__A : int ):
_lowerCamelCase : List[Any] = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_lowerCamelCase : Optional[int] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def lowerCamelCase_ ( self : Dict,__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : int = self.get_input_output_texts(__A )
_lowerCamelCase : Optional[Any] = tokenizer.encode(__A,add_special_tokens=__A )
_lowerCamelCase : int = tokenizer.decode(__A,clean_up_tokenization_spaces=__A )
return text, ids
def lowerCamelCase_ ( self : int ):
pass # TODO add if relevant
def lowerCamelCase_ ( self : Tuple ):
pass # TODO add if relevant
def lowerCamelCase_ ( self : Dict ):
pass # TODO add if relevant
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
# Testing tokenization
_lowerCamelCase : Union[str, Any] = "こんにちは、世界。 こんばんは、㔺界。"
_lowerCamelCase : List[Any] = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_lowerCamelCase : str = tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A,__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
_lowerCamelCase : Optional[Any] = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_lowerCamelCase : Tuple = "こんにちは、、、、世界。こんばんは、、、、世界。"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : int = tokenizer.decode(__A )
self.assertEqual(__A,__A )
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_lowerCamelCase : Optional[Any] = "こんにちは、世界。"
_lowerCamelCase : List[str] = "こんばんは、㔺界。😀"
_lowerCamelCase : str = "こんにちは、世界。こんばんは、世界。😀"
_lowerCamelCase : Tuple = tokenizer.encode(prefix_text + input_text )
_lowerCamelCase : List[Any] = tokenizer.encode("",prefix_text=prefix_text + input_text )
_lowerCamelCase : Optional[Any] = tokenizer.encode(__A,prefix_text=__A )
_lowerCamelCase : Optional[Any] = tokenizer.decode(__A )
_lowerCamelCase : List[str] = tokenizer.decode(__A )
_lowerCamelCase : Dict = tokenizer.decode(__A )
self.assertEqual(__A,__A )
self.assertEqual(__A,__A )
self.assertEqual(__A,__A )
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_lowerCamelCase : Optional[int] = "こんにちは、世界。"
_lowerCamelCase : Optional[Any] = "こんばんは、㔺界。😀"
_lowerCamelCase : Any = len(tokenizer.encode(__A ) ) - 2
_lowerCamelCase : Dict = len(tokenizer.encode(__A ) ) - 2
_lowerCamelCase : int = [1] + [0] * (len_prefix + len_text + 1)
_lowerCamelCase : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
_lowerCamelCase : List[str] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_lowerCamelCase : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids
_lowerCamelCase : List[Any] = tokenizer("",prefix_text=prefix_text + input_text ).token_type_ids
_lowerCamelCase : str = tokenizer(__A,prefix_text=__A ).token_type_ids
self.assertListEqual(__A,__A )
self.assertListEqual(__A,__A )
self.assertListEqual(__A,__A )
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_lowerCamelCase : Any = tokenizer.encode("あンいワ" )
_lowerCamelCase : List[Any] = tokenizer.encode("",prefix_text="あンいワ" )
_lowerCamelCase : str = tokenizer.encode("いワ",prefix_text="あン" )
self.assertEqual(tokenizer.decode(__A ),tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ),tokenizer.decode(__A ) )
self.assertNotEqual(__A,__A )
self.assertNotEqual(__A,__A )
self.assertEqual(x_token_a[1],x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1],x_token_a[3] ) # SEG token
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_lowerCamelCase : str = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_lowerCamelCase : List[Any] = tokenizer(__A,padding=__A )
_lowerCamelCase : str = tokenizer.batch_encode_plus(__A,padding=__A )
# fmt: off
_lowerCamelCase : Dict = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_lowerCamelCase : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_lowerCamelCase : Tuple = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids,__A )
self.assertListEqual(x_token.token_type_ids,__A )
self.assertListEqual(x_token.attention_mask,__A )
self.assertListEqual(x_token_a.input_ids,__A )
self.assertListEqual(x_token_a.token_type_ids,__A )
self.assertListEqual(x_token_a.attention_mask,__A )
def lowerCamelCase_ ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase_ ( self : Any ):
# tokenizer has no padding token
pass
| 44 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCAmelCase_ : List[str] = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : Any = {
'RUCAIBox/mvp': 1024,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = MvpTokenizer
def __init__( self : Any,__A : Tuple=None,__A : str=None,__A : List[Any]=None,__A : Union[str, Any]="replace",__A : Union[str, Any]="<s>",__A : Optional[int]="</s>",__A : List[str]="</s>",__A : Any="<s>",__A : Dict="<unk>",__A : Union[str, Any]="<pad>",__A : Optional[int]="<mask>",__A : List[str]=False,__A : str=True,**__A : str,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : Optional[int] = add_prefix_space
_lowerCamelCase : List[str] = pre_tok_class(**__A )
_lowerCamelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[int] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Optional[int] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : str = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Tuple = True
if changes_to_apply:
_lowerCamelCase : Dict = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Dict = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : List[str] ):
_lowerCamelCase : Optional[int] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : Optional[Any] = value
def lowerCamelCase_ ( self : List[Any],*__A : Dict,**__A : int ):
_lowerCamelCase : Any = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Union[str, Any],*__A : Optional[int],**__A : Dict ):
_lowerCamelCase : Any = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : int,__A : Optional[Any]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 44 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'funnel'
lowerCAmelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self : Optional[Any],__A : int=3_0_5_2_2,__A : Dict=[4, 4, 4],__A : int=None,__A : Union[str, Any]=2,__A : int=7_6_8,__A : str=1_2,__A : Dict=6_4,__A : int=3_0_7_2,__A : Optional[int]="gelu_new",__A : Union[str, Any]=0.1,__A : List[Any]=0.1,__A : List[str]=0.0,__A : int=0.1,__A : int=None,__A : Optional[int]=1e-9,__A : str="mean",__A : List[str]="relative_shift",__A : List[str]=True,__A : Optional[Any]=True,__A : Dict=True,**__A : Dict,):
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Any = block_sizes
_lowerCamelCase : List[str] = [1] * len(__A ) if block_repeats is None else block_repeats
assert len(__A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_lowerCamelCase : Optional[Any] = num_decoder_layers
_lowerCamelCase : Dict = d_model
_lowerCamelCase : List[Any] = n_head
_lowerCamelCase : Tuple = d_head
_lowerCamelCase : List[str] = d_inner
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : Dict = attention_dropout
_lowerCamelCase : Tuple = activation_dropout
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : str = initializer_std
_lowerCamelCase : str = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
_lowerCamelCase : Dict = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
_lowerCamelCase : List[str] = attention_type
_lowerCamelCase : Dict = separate_cls
_lowerCamelCase : int = truncate_seq
_lowerCamelCase : List[str] = pool_q_only
super().__init__(**__A )
@property
def lowerCamelCase_ ( self : str ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCamelCase_ ( self : Any,__A : int ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def lowerCamelCase_ ( self : Tuple ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int] ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 44 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : Dict,__A : Tuple=2,__A : int=True,__A : List[Any]=False,__A : Optional[Any]=1_0,__A : List[Any]=3,__A : int=3_2 * 8,__A : Optional[int]=3_2 * 8,__A : str=4,__A : List[Any]=6_4,):
_lowerCamelCase : str = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Dict = use_auxiliary_loss
_lowerCamelCase : Optional[Any] = num_queries
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : str = min_size
_lowerCamelCase : int = max_size
_lowerCamelCase : Union[str, Any] = num_labels
_lowerCamelCase : Any = hidden_dim
_lowerCamelCase : Any = hidden_dim
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__A )
_lowerCamelCase : int = torch.ones([self.batch_size, self.min_size, self.max_size],device=__A )
_lowerCamelCase : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size],device=__A ) > 0.5
).float()
_lowerCamelCase : int = (torch.rand((self.batch_size, self.num_labels),device=__A ) > 0.5).long()
_lowerCamelCase : str = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Union[str, Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim,)
_lowerCamelCase : Any = self.num_queries
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : Optional[Any] = [1, 1, 1, 1]
_lowerCamelCase : Optional[int] = self.num_channels
_lowerCamelCase : Dict = 6_4
_lowerCamelCase : List[Any] = 1_2_8
_lowerCamelCase : List[Any] = self.hidden_dim
_lowerCamelCase : int = self.hidden_dim
_lowerCamelCase : Tuple = self.hidden_dim
return config
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : int,__A : Dict,__A : List[str] ):
_lowerCamelCase : Any = output.encoder_hidden_states
_lowerCamelCase : Any = output.pixel_decoder_hidden_states
_lowerCamelCase : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__A ),len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ),len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__A ),config.decoder_layers )
def lowerCamelCase_ ( self : Optional[Any],__A : Dict,__A : Union[str, Any],__A : Any,__A : List[Any]=False ):
with torch.no_grad():
_lowerCamelCase : Any = MaskaFormerModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(pixel_values=__A,pixel_mask=__A )
_lowerCamelCase : Any = model(__A,output_hidden_states=__A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape,(self.batch_size, self.num_queries, self.hidden_dim),)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__A,__A )
def lowerCamelCase_ ( self : Tuple,__A : Optional[int],__A : List[Any],__A : str,__A : int,__A : List[str] ):
_lowerCamelCase : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=__A )
model.to(__A )
model.eval()
def comm_check_on_output(__A : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4),)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : int = model(pixel_values=__A,pixel_mask=__A )
_lowerCamelCase : Union[str, Any] = model(__A )
comm_check_on_output(__A )
_lowerCamelCase : int = model(
pixel_values=__A,pixel_mask=__A,mask_labels=__A,class_labels=__A )
comm_check_on_output(__A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[Any] = MaskaFormerModelTester(self )
_lowerCamelCase : Any = ConfigTester(self,config_class=__A,has_text_modality=__A )
def lowerCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__A,**__A,output_hidden_states=__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__A )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCamelCase_ ( self : int ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCamelCase_ ( self : Dict ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCamelCase_ ( self : int ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self : Tuple ):
pass
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Tuple = model_class(__A )
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
@slow
def lowerCamelCase_ ( self : List[str] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
"pixel_values": torch.randn((2, 3, *size),device=__A ),
"mask_labels": torch.randn((2, 1_0, *size),device=__A ),
"class_labels": torch.zeros(2,1_0,device=__A ).long(),
}
_lowerCamelCase : List[str] = self.model_tester.get_config()
_lowerCamelCase : Any = MaskaFormerForUniversalSegmentation(__A ).to(__A )
_lowerCamelCase : str = model(**__A )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__A,**__A,output_hidden_states=__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__A ).to(__A )
_lowerCamelCase : List[str] = model(**__A,output_attentions=__A )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase_ ( self : List[str] ):
if not self.model_tester.is_training:
return
_lowerCamelCase : str = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = model_class(__A )
model.to(__A )
model.train()
_lowerCamelCase : Optional[int] = model(__A,mask_labels=__A,class_labels=__A ).loss
loss.backward()
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[int] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = True
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : str = model_class(__A ).to(__A )
model.train()
_lowerCamelCase : str = model(__A,mask_labels=__A,class_labels=__A )
_lowerCamelCase : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ : List[str] = 1E-4
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCamelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : str = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__A )
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : List[str] = image_processor(__A,return_tensors="pt" ).to(__A )
_lowerCamelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__A,(1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__A )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3],__A,atol=__A ) )
_lowerCamelCase : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3],__A,atol=__A ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3],__A,atol=__A ) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__A ).eval()
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(__A,return_tensors="pt" ).to(__A )
_lowerCamelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__A,(1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__A )
# masks_queries_logits
_lowerCamelCase : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_lowerCamelCase : Optional[Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_lowerCamelCase : Optional[int] = torch.tensor(__A ).to(__A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3],__A,atol=__A ) )
# class_queries_logits
_lowerCamelCase : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape,(1, model.config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Optional[int] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3],__A,atol=__A ) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__A ).eval()
_lowerCamelCase : Dict = self.default_image_processor
_lowerCamelCase : Tuple = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )],segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )],return_tensors="pt",)
_lowerCamelCase : Any = inputs["pixel_values"].to(__A )
_lowerCamelCase : Optional[Any] = [el.to(__A ) for el in inputs["mask_labels"]]
_lowerCamelCase : Optional[Any] = [el.to(__A ) for el in inputs["class_labels"]]
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__A )
self.assertTrue(outputs.loss is not None )
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A ):
def __init__( self : Tuple,__A : Optional[Any],__A : int ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__A,scheduler=__A )
@torch.no_grad()
def __call__( self : Tuple,__A : int = 1,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : float = 0.0,__A : int = 5_0,__A : Optional[bool] = None,__A : Optional[str] = "pil",__A : bool = True,):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size,__A ):
_lowerCamelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCamelCase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__A,__A ) and len(__A ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__A )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCamelCase : Tuple = randn_tensor(__A,generator=__A,device=self.device,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCamelCase : Any = self.unet(__A,__A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(
__A,__A,__A,eta=__A,use_clipped_model_output=__A,generator=__A ).prev_sample
_lowerCamelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0,1 )
_lowerCamelCase : Tuple = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Any = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 44 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
_lowerCamelCase : Optional[int] = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
_lowerCamelCase : Optional[Any] = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
_lowerCamelCase : Optional[Any] = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCAmelCase , 2 ) - pow(_lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCAmelCase , 2 ) - pow(_lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCAmelCase , 2 ) + pow(_lowerCAmelCase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Dict = int(_lowerCAmelCase )
if n_element < 1:
_lowerCamelCase : Any = ValueError("a should be a positive number" )
raise my_error
_lowerCamelCase : List[Any] = [1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = (0, 0, 0)
_lowerCamelCase : Tuple = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
UpperCAmelCase_ : Dict = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 44 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 1 |
'''simple docstring'''
from ....utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[int],__A : int,__A : List[Any]=None,__A : Any=2_0_4_8 ):
_lowerCamelCase : List[str] = config.__dict__
_lowerCamelCase : Union[str, Any] = modal_hidden_size
if num_labels:
_lowerCamelCase : List[Any] = num_labels
| 44 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return []
_lowerCamelCase , _lowerCamelCase : int = min(_lowerCAmelCase ), max(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = int(max_value - min_value ) + 1
_lowerCamelCase : list[list] = [[] for _ in range(_lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCAmelCase )
return [v for bucket in buckets for v in sorted(_lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
if index == len(_lowerCAmelCase ):
return True
# Recursive Step
for i in range(_lowerCAmelCase ):
if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ):
# Color current vertex
_lowerCamelCase : Dict = i
# Validate coloring
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ):
return True
# Backtrack
_lowerCamelCase : List[Any] = -1
return False
def A_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [-1] * len(_lowerCAmelCase )
if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ):
return colored_vertices
return []
| 44 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = len(_lowerCAmelCase )
_lowerCamelCase : Dict = sum(_lowerCAmelCase )
_lowerCamelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowerCamelCase : Optional[Any] = True
for i in range(1 , s + 1 ):
_lowerCamelCase : Optional[int] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowerCamelCase : Tuple = dp[i][j - 1]
if arr[i - 1] <= j:
_lowerCamelCase : Optional[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowerCamelCase : int = s - 2 * j
break
return diff
| 44 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase__ :
def __init__( self : List[str] ):
_lowerCamelCase : Optional[Any] = {}
def lowerCamelCase_ ( self : Optional[int],__A : int,__A : Optional[Any],__A : Optional[int]=1 ):
if self.graph.get(__A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_lowerCamelCase : Tuple = [[w, v]]
if not self.graph.get(__A ):
_lowerCamelCase : Tuple = []
def lowerCamelCase_ ( self : Optional[int] ):
return list(self.graph )
def lowerCamelCase_ ( self : List[str],__A : Any,__A : Union[str, Any] ):
if self.graph.get(__A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__A )
def lowerCamelCase_ ( self : int,__A : Any=-2,__A : Optional[int]=-1 ):
if s == d:
return []
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = []
if s == -2:
_lowerCamelCase : str = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_lowerCamelCase : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__A ) != 0:
_lowerCamelCase : List[str] = stack[len(__A ) - 1]
else:
_lowerCamelCase : int = ss
# check if se have reached the starting point
if len(__A ) == 0:
return visited
def lowerCamelCase_ ( self : Union[str, Any],__A : Optional[Any]=-1 ):
if c == -1:
_lowerCamelCase : List[str] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(__A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_lowerCamelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(__A,__A,1 )
def lowerCamelCase_ ( self : Union[str, Any],__A : Dict=-2 ):
_lowerCamelCase : Tuple = deque()
_lowerCamelCase : Any = []
if s == -2:
_lowerCamelCase : Optional[Any] = list(self.graph )[0]
d.append(__A )
visited.append(__A )
while d:
_lowerCamelCase : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase_ ( self : Dict,__A : List[str] ):
_lowerCamelCase : int = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase_ ( self : Optional[Any],__A : Dict ):
return len(self.graph[u] )
def lowerCamelCase_ ( self : Optional[int],__A : Dict=-2 ):
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = []
if s == -2:
_lowerCamelCase : Tuple = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_lowerCamelCase : Union[str, Any] = s
_lowerCamelCase : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__A ) != 0:
_lowerCamelCase : Tuple = stack[len(__A ) - 1]
else:
_lowerCamelCase : List[str] = ss
# check if se have reached the starting point
if len(__A ) == 0:
return sorted_nodes
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = []
_lowerCamelCase : Tuple = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_lowerCamelCase : int = -2
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = s
_lowerCamelCase : str = False
_lowerCamelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : List[Any] = len(__A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : str = True
if len(__A ) != 0:
_lowerCamelCase : Union[str, Any] = stack[len(__A ) - 1]
else:
_lowerCamelCase : int = False
indirect_parents.append(__A )
_lowerCamelCase : int = s
_lowerCamelCase : Dict = ss
# check if se have reached the starting point
if len(__A ) == 0:
return list(__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Any = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_lowerCamelCase : List[str] = -2
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = s
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : List[Any] = len(__A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : Any = True
if len(__A ) != 0:
_lowerCamelCase : str = stack[len(__A ) - 1]
else:
_lowerCamelCase : Any = False
indirect_parents.append(__A )
_lowerCamelCase : Optional[int] = s
_lowerCamelCase : Any = ss
# check if se have reached the starting point
if len(__A ) == 0:
return False
def lowerCamelCase_ ( self : List[Any],__A : Any=-2,__A : Union[str, Any]=-1 ):
_lowerCamelCase : List[Any] = time()
self.dfs(__A,__A )
_lowerCamelCase : Any = time()
return end - begin
def lowerCamelCase_ ( self : Union[str, Any],__A : List[str]=-2 ):
_lowerCamelCase : List[Any] = time()
self.bfs(__A )
_lowerCamelCase : List[Any] = time()
return end - begin
class UpperCAmelCase__ :
def __init__( self : str ):
_lowerCamelCase : Optional[int] = {}
def lowerCamelCase_ ( self : Tuple,__A : Tuple,__A : int,__A : Tuple=1 ):
# check if the u exists
if self.graph.get(__A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_lowerCamelCase : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(__A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_lowerCamelCase : Tuple = [[w, u]]
def lowerCamelCase_ ( self : Any,__A : Union[str, Any],__A : Optional[int] ):
if self.graph.get(__A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__A )
# the other way round
if self.graph.get(__A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__A )
def lowerCamelCase_ ( self : str,__A : List[str]=-2,__A : List[Any]=-1 ):
if s == d:
return []
_lowerCamelCase : Any = []
_lowerCamelCase : List[Any] = []
if s == -2:
_lowerCamelCase : int = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_lowerCamelCase : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__A ) != 0:
_lowerCamelCase : Optional[Any] = stack[len(__A ) - 1]
else:
_lowerCamelCase : Any = ss
# check if se have reached the starting point
if len(__A ) == 0:
return visited
def lowerCamelCase_ ( self : str,__A : List[str]=-1 ):
if c == -1:
_lowerCamelCase : Union[str, Any] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(__A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_lowerCamelCase : Any = floor(random() * c ) + 1
if n != i:
self.add_pair(__A,__A,1 )
def lowerCamelCase_ ( self : Optional[int],__A : int=-2 ):
_lowerCamelCase : List[str] = deque()
_lowerCamelCase : Optional[Any] = []
if s == -2:
_lowerCamelCase : Optional[int] = list(self.graph )[0]
d.append(__A )
visited.append(__A )
while d:
_lowerCamelCase : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase_ ( self : Optional[int],__A : Optional[Any] ):
return len(self.graph[u] )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_lowerCamelCase : Tuple = -2
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = s
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : Optional[int] = len(__A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : int = True
if len(__A ) != 0:
_lowerCamelCase : int = stack[len(__A ) - 1]
else:
_lowerCamelCase : str = False
indirect_parents.append(__A )
_lowerCamelCase : Optional[int] = s
_lowerCamelCase : str = ss
# check if se have reached the starting point
if len(__A ) == 0:
return list(__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = []
_lowerCamelCase : List[str] = []
_lowerCamelCase : Tuple = list(self.graph )[0]
stack.append(__A )
visited.append(__A )
_lowerCamelCase : str = -2
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Any = s
_lowerCamelCase : int = False
_lowerCamelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCamelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCamelCase : int = len(__A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCamelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCamelCase : int = True
if len(__A ) != 0:
_lowerCamelCase : List[str] = stack[len(__A ) - 1]
else:
_lowerCamelCase : Any = False
indirect_parents.append(__A )
_lowerCamelCase : Tuple = s
_lowerCamelCase : int = ss
# check if se have reached the starting point
if len(__A ) == 0:
return False
def lowerCamelCase_ ( self : Dict ):
return list(self.graph )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[Any]=-2,__A : Any=-1 ):
_lowerCamelCase : Dict = time()
self.dfs(__A,__A )
_lowerCamelCase : Tuple = time()
return end - begin
def lowerCamelCase_ ( self : Dict,__A : Union[str, Any]=-2 ):
_lowerCamelCase : Optional[int] = time()
self.bfs(__A )
_lowerCamelCase : Dict = time()
return end - begin
| 44 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
def __init__( self : Optional[int] ):
_lowerCamelCase : List[Any] = [2, 1, 2, -1]
_lowerCamelCase : Any = [1, 2, 3, 4]
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = len(self.first_signal )
_lowerCamelCase : str = len(self.second_signal )
_lowerCamelCase : Tuple = max(__A,__A )
# create a zero matrix of max_length x max_length
_lowerCamelCase : Optional[int] = [[0] * max_length for i in range(__A )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__A ):
_lowerCamelCase : str = deque(self.second_signal )
rotated_signal.rotate(__A )
for j, item in enumerate(__A ):
matrix[i][j] += item
# multiply the matrix with the first signal
_lowerCamelCase : str = np.matmul(np.transpose(__A ),np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__A,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 44 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : float = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 1 |
'''simple docstring'''
UpperCAmelCase_ : Any = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 44 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = LxmertConfig.from_json_file(_lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase : str = LxmertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 44 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase_ : Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 44 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
@staticmethod
def lowerCamelCase_ ( *__A : Any,**__A : int ):
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_lowerCamelCase : int = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : List[str],__A : List[str],__A : List[Any],__A : Tuple ):
_lowerCamelCase : List[str] = DepthEstimationPipeline(model=__A,image_processor=__A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : str,__A : Optional[Any],__A : int ):
_lowerCamelCase : List[Any] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},__A )
import datasets
_lowerCamelCase : Optional[int] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils","image",split="test" )
_lowerCamelCase : Optional[int] = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
],__A,)
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@slow
@require_torch
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = "Intel/dpt-large"
_lowerCamelCase : Optional[int] = pipeline("depth-estimation",model=__A )
_lowerCamelCase : Dict = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_lowerCamelCase : Any = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ),29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ),2.662 )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 44 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCamelCase_ ( self : Dict ):
super().setUp()
_lowerCamelCase : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCamelCase : Union[str, Any] = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Dict = {"unk_token": "<unk>"}
_lowerCamelCase : Any = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : str,**__A : str ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Dict,**__A : Dict ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Any,__A : str ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_lowerCamelCase : List[str] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Any = tokenizer(__A,max_length=len(__A ),padding=__A,return_tensors="pt" )
self.assertIsInstance(__A,__A )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
_lowerCamelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(__A,__A )
@require_torch
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Optional[int] = tokenizer(__A,padding=__A,return_tensors="pt" )
self.assertIn("input_ids",__A )
self.assertIn("attention_mask",__A )
self.assertNotIn("labels",__A )
self.assertNotIn("decoder_attention_mask",__A )
@require_torch
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Optional[int] = tokenizer(text_target=__A,max_length=3_2,padding="max_length",return_tensors="pt" )
self.assertEqual(3_2,targets["input_ids"].shape[1] )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Tuple = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"],padding=__A,truncation=__A,return_tensors="pt" )
self.assertIsInstance(__A,__A )
self.assertEqual(batch.input_ids.shape,(2, 5_1_2_2) )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = ["A long paragraph for summarization."]
_lowerCamelCase : Any = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : int = tokenizer(__A,return_tensors="pt" )
_lowerCamelCase : str = tokenizer(text_target=__A,return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = inputs["input_ids"]
_lowerCamelCase : List[Any] = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Union[str, Any] = ["Summary of the text.", "Another summary."]
_lowerCamelCase : Union[str, Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowerCamelCase : Optional[Any] = tokenizer(__A,padding=__A )
_lowerCamelCase : Optional[int] = [[0] * len(__A ) for x in encoded_output["input_ids"]]
_lowerCamelCase : Any = tokenizer.pad(__A )
self.assertSequenceEqual(outputs["global_attention_mask"],__A )
def lowerCamelCase_ ( self : Any ):
pass
def lowerCamelCase_ ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : Union[str, Any] = "A, <mask> AllenNLP sentence."
_lowerCamelCase : Dict = tokenizer_r.encode_plus(__A,add_special_tokens=__A,return_token_type_ids=__A )
_lowerCamelCase : int = tokenizer_p.encode_plus(__A,add_special_tokens=__A,return_token_type_ids=__A )
self.assertEqual(sum(tokens_r["token_type_ids"] ),sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ),sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ),)
_lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_lowerCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"],[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"],[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 44 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 1 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any=1024 ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = [], []
_lowerCamelCase : List[str] = list(zip(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase , _lowerCamelCase : List[str] = sorted_examples[0]
def is_too_big(_lowerCAmelCase : str ):
return tok(_lowerCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_lowerCamelCase : Any = new_src + " " + src
_lowerCamelCase : Union[str, Any] = new_tgt + " " + tgt
if is_too_big(_lowerCAmelCase ) or is_too_big(_lowerCAmelCase ): # cant fit, finalize example
finished_src.append(_lowerCAmelCase )
finished_tgt.append(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = src, tgt
else: # can fit, keep adding
_lowerCamelCase , _lowerCamelCase : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowerCAmelCase )
finished_tgt.append(_lowerCAmelCase )
return finished_src, finished_tgt
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Path , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = Path(_lowerCAmelCase )
save_path.mkdir(exist_ok=_lowerCAmelCase )
for split in ["train"]:
_lowerCamelCase , _lowerCamelCase : Any = data_dir / F'{split}.source', data_dir / F'{split}.target'
_lowerCamelCase : Tuple = [x.rstrip() for x in Path(_lowerCAmelCase ).open().readlines()]
_lowerCamelCase : Dict = [x.rstrip() for x in Path(_lowerCAmelCase ).open().readlines()]
_lowerCamelCase , _lowerCamelCase : List[str] = pack_examples(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F'packed {split} split from {len(_lowerCAmelCase )} examples -> {len(_lowerCAmelCase )}.' )
Path(save_path / F'{split}.source' ).open("w" ).write("\n".join(_lowerCAmelCase ) )
Path(save_path / F'{split}.target' ).open("w" ).write("\n".join(_lowerCAmelCase ) )
for split in ["val", "test"]:
_lowerCamelCase , _lowerCamelCase : List[Any] = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(_lowerCAmelCase , save_path / F'{split}.source' )
shutil.copyfile(_lowerCAmelCase , save_path / F'{split}.target' )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_lowerCAmelCase , default=128 )
parser.add_argument("--data_dir" , type=_lowerCAmelCase )
parser.add_argument("--save_path" , type=_lowerCAmelCase )
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 44 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
_lowerCamelCase : int = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ : str = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ : Optional[int] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ : str = model.state_dict()
UpperCAmelCase_ : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ : Tuple = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
UpperCAmelCase_ : str = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
UpperCAmelCase_ : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase_ : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
UpperCAmelCase_ : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
UpperCAmelCase_ : Tuple = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
UpperCAmelCase_ : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
UpperCAmelCase_ : List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
UpperCAmelCase_ : List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
UpperCAmelCase_ : Optional[int] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
UpperCAmelCase_ : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
UpperCAmelCase_ : str = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ : List[str] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ : Any = state_dict[f'''cls.predictions.transform.dense.{w}''']
UpperCAmelCase_ : List[Any] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 1 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
for line in lines:
_lowerCamelCase : List[str] = re.sub(r"#.*" , "" , _lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "\n".join(_lowerCAmelCase )
# Make a hash from all this code
_lowerCamelCase : Tuple = full_str.encode("utf-8" )
return shaaaa(_lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ : int = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ : Union[str, Any] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ : Dict = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : str = 'Muhammad Umer Farooq'
UpperCAmelCase_ : Dict = 'MIT'
UpperCAmelCase_ : Optional[int] = '1.0.0'
UpperCAmelCase_ : List[str] = 'Muhammad Umer Farooq'
UpperCAmelCase_ : Optional[Any] = '[email protected]'
UpperCAmelCase_ : Optional[int] = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[Any],__A : str ):
super().__init__()
_lowerCamelCase : list[str] = []
_lowerCamelCase : Optional[Any] = domain
def lowerCamelCase_ ( self : str,__A : str,__A : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_lowerCamelCase : Optional[Any] = parse.urljoin(self.domain,__A )
self.urls.append(__A )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase : str = "https://github.com" ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = get_domain_name(_lowerCAmelCase )
# Initialize the parser
_lowerCamelCase : int = Parser(_lowerCAmelCase )
try:
# Open URL
_lowerCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_lowerCamelCase : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_lowerCamelCase : Tuple = requests.get(_lowerCAmelCase )
# Get the valid email.
_lowerCamelCase : Union[str, Any] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = emails_from_url('https://github.com')
print(f'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 44 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 1 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 0
for i in range(1 , int(sqrt(_lowerCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_lowerCAmelCase ):
total += i + n // i
elif i == sqrt(_lowerCAmelCase ):
total += i
return total - n
def A_ ( _lowerCAmelCase : int = 10000 ):
"""simple docstring"""
_lowerCamelCase : Dict = sum(
i
for i in range(1 , _lowerCAmelCase )
if sum_of_divisors(sum_of_divisors(_lowerCAmelCase ) ) == i and sum_of_divisors(_lowerCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 44 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ : str = [
'good first issue',
'feature request',
'wip',
]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Github(os.environ["GITHUB_TOKEN"] )
_lowerCamelCase : Optional[int] = g.get_repo("huggingface/accelerate" )
_lowerCamelCase : Dict = repo.get_issues(state="open" )
for issue in open_issues:
_lowerCamelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
_lowerCamelCase : Any = comments[0] if len(_lowerCAmelCase ) > 0 else None
_lowerCamelCase : List[Any] = dt.utcnow()
_lowerCamelCase : Optional[Any] = (current_time - issue.updated_at).days
_lowerCamelCase : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ : List[Any] = logging.getLogger(__name__)
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Any = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=_lowerCAmelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=_lowerCAmelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=_lowerCAmelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=_lowerCAmelCase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=_lowerCAmelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=_lowerCAmelCase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=_lowerCAmelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
_lowerCamelCase : Any = parser.parse_args()
return args
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
def fn(_lowerCAmelCase : Union[str, Any] ):
return tokenizer(examples["text"] )
return fn
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = []
for i in range(len(tokenized_data["input_ids"] ) ):
_lowerCamelCase : Dict = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_lowerCamelCase : Any = tf.train.Features(feature=_lowerCAmelCase )
_lowerCamelCase : List[Any] = tf.train.Example(features=_lowerCAmelCase )
_lowerCamelCase : Tuple = example.SerializeToString()
records.append(_lowerCAmelCase )
return records
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_lowerCamelCase : List[Any] = min(len(_lowerCAmelCase ) , args.limit )
_lowerCamelCase : Tuple = dataset.select(range(_lowerCAmelCase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCamelCase : str = os.path.join(args.output_dir , args.split )
if not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
else:
_lowerCamelCase : List[str] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_lowerCamelCase : Optional[Any] = tokenize_function(_lowerCAmelCase )
_lowerCamelCase : List[Any] = dataset.map(_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_lowerCAmelCase : str ):
# Concatenate all texts.
_lowerCamelCase : Union[str, Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
_lowerCamelCase : Optional[int] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCamelCase : Optional[int] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCamelCase : Optional[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , _lowerCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCamelCase : Tuple = dataset_tokenized.map(_lowerCAmelCase , batched=_lowerCAmelCase , batch_size=1000 , num_proc=4 )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Tuple = 0
for shard in range(0 , len(_lowerCAmelCase ) , args.shard_size ):
_lowerCamelCase : Tuple = grouped_dataset[shard : shard + args.shard_size]
_lowerCamelCase : int = len(dataset_snapshot["input_ids"] )
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
_lowerCamelCase : Optional[Any] = get_serialized_examples(_lowerCAmelCase )
with tf.io.TFRecordWriter(_lowerCAmelCase ) as out_file:
for i in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : List[str] = serialized_examples[i]
out_file.write(_lowerCAmelCase )
print("Wrote file {} containing {} records".format(_lowerCAmelCase , _lowerCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = parse_args()
main(args)
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = 1_0
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = [1, 2, 3, 4]
_lowerCamelCase : Dict = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__A,self.block_size,0 ),__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
_lowerCamelCase : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__A,self.block_size,0 ),__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
_lowerCamelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__A,self.block_size,0 ),__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : int = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_lowerCamelCase , _lowerCamelCase : List[str] = process_story(__A )
self.assertEqual(__A,[] )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = ""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = process_story(__A )
self.assertEqual(__A,[] )
self.assertEqual(__A,[] )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Any = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_lowerCamelCase , _lowerCamelCase : List[str] = process_story(__A )
_lowerCamelCase : Tuple = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__A,__A )
_lowerCamelCase : str = ["It was the best of times."]
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : int = torch.tensor([1, 2, 3, 4] )
_lowerCamelCase : str = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__A,0 ).numpy(),expected.numpy() )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
_lowerCamelCase : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__A,2_3 ).numpy(),expected.numpy() )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCamelCase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__A,1 ).numpy(),expected.numpy() )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[int] = 1_0_1
_lowerCamelCase : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
_lowerCamelCase : Dict = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCamelCase : List[Any] = compute_token_type_ids(__A,__A )
np.testing.assert_array_equal(__A,__A )
| 44 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = BlipImageProcessor()
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
_lowerCamelCase : Tuple = BlipaProcessor(__A,__A )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Optional[int],**__A : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__A ).tokenizer
def lowerCamelCase_ ( self : str,**__A : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__A ).image_processor
def lowerCamelCase_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Dict = [np.random.randint(2_5_5,size=(3, 3_0, 4_0_0),dtype=np.uinta )]
_lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(__A,0,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[int] = BlipaProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)",eos_token="(EOS)" )
_lowerCamelCase : Any = self.get_image_processor(do_normalize=__A,padding_value=1.0 )
_lowerCamelCase : Optional[int] = BlipaProcessor.from_pretrained(
self.tmpdirname,bos_token="(BOS)",eos_token="(EOS)",do_normalize=__A,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer,__A )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Any = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : int = self.prepare_image_inputs()
_lowerCamelCase : int = image_processor(__A,return_tensors="np" )
_lowerCamelCase : Optional[int] = processor(images=__A,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1e-2 )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : List[Any] = processor(text=__A )
_lowerCamelCase : Optional[int] = tokenizer(__A,return_token_type_ids=__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key],encoded_processor[key] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Any = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : int = "lower newer"
_lowerCamelCase : List[Any] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=__A,images=__A )
self.assertListEqual(list(inputs.keys() ),["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : str = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Dict = processor.batch_decode(__A )
_lowerCamelCase : Tuple = tokenizer.batch_decode(__A )
self.assertListEqual(__A,__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : int = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = processor(text=__A,images=__A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ),["pixel_values", "input_ids", "attention_mask"] )
| 44 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'bert'
def __init__( self : Dict,__A : str=3_0_5_2_2,__A : List[Any]=7_6_8,__A : Union[str, Any]=1_2,__A : Any=1_2,__A : str=3_0_7_2,__A : List[str]="gelu",__A : int=0.1,__A : Any=0.1,__A : int=5_1_2,__A : Union[str, Any]=2,__A : List[str]=0.02,__A : Tuple=1e-12,__A : Optional[Any]=0,__A : int="absolute",__A : str=True,__A : Any=None,**__A : str,):
super().__init__(pad_token_id=__A,**__A )
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : str = classifier_dropout
class UpperCAmelCase__ ( A ):
@property
def lowerCamelCase_ ( self : List[Any] ):
if self.task == "multiple-choice":
_lowerCamelCase : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 44 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase_ : List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
UpperCAmelCase_ : Any = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
UpperCAmelCase_ : Tuple = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCamelCase : Any = new_id
# turn into Numpy arrays
_lowerCamelCase : str = np.array(_lowerCAmelCase )
_lowerCamelCase : List[str] = np.array(_lowerCAmelCase )
if reduce_labels:
_lowerCamelCase : Optional[int] = 255
_lowerCamelCase : Tuple = label - 1
_lowerCamelCase : List[Any] = 255
_lowerCamelCase : int = label != ignore_index
_lowerCamelCase : str = np.not_equal(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : List[str] = pred_label[mask]
_lowerCamelCase : str = np.array(_lowerCAmelCase )[mask]
_lowerCamelCase : Union[str, Any] = pred_label[pred_label == label]
_lowerCamelCase : int = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Dict = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Tuple = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
_lowerCamelCase : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = intersect_and_union(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = total_intersect_and_union(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# compute metrics
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
_lowerCamelCase : Optional[Any] = total_area_intersect / total_area_union
_lowerCamelCase : str = total_area_intersect / total_area_label
_lowerCamelCase : Union[str, Any] = np.nanmean(_lowerCAmelCase )
_lowerCamelCase : Dict = np.nanmean(_lowerCAmelCase )
_lowerCamelCase : Any = all_acc
_lowerCamelCase : Tuple = iou
_lowerCamelCase : str = acc
if nan_to_num is not None:
_lowerCamelCase : str = {metric: np.nan_to_num(_lowerCAmelCase , nan=_lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ),reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
],)
def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : Dict,__A : int,__A : bool,__A : Optional[int] = None,__A : Optional[Dict[int, int]] = None,__A : bool = False,):
_lowerCamelCase : Optional[int] = mean_iou(
results=__A,gt_seg_maps=__A,num_labels=__A,ignore_index=__A,nan_to_num=__A,label_map=__A,reduce_labels=__A,)
return iou_result
| 44 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Dict = False
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : str = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
UpperCAmelCase_ : List[Any] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
UpperCAmelCase_ : Optional[int] = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
UpperCAmelCase_ : List[str] = reader.read()
UpperCAmelCase_ : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
UpperCAmelCase_ : Dict = UNetaDModel(**config)
else:
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
UpperCAmelCase_ : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase_ : Optional[int] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase_ : Optional[int] = config[key]
del config[key]
UpperCAmelCase_ : List[str] = [k.replace('UNetRes', '') for k in config['down_block_types']]
UpperCAmelCase_ : Union[str, Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
UpperCAmelCase_ : List[str] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
UpperCAmelCase_ : Optional[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
UpperCAmelCase_ : Optional[int] = param_value
UpperCAmelCase_ : List[Any] = True
if not has_changed:
UpperCAmelCase_ : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 44 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Any=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCamelCase : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCamelCase : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCamelCase : Union[str, Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCAmelCase )
if decoder_head_mask is None:
_lowerCamelCase : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCAmelCase )
if cross_attn_head_mask is None:
_lowerCamelCase : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Dict,__A : Tuple=1_3,__A : Optional[Any]=7,__A : Optional[Any]=True,__A : Tuple=False,__A : Optional[int]=9_9,__A : List[Any]=1_6,__A : str=2,__A : Optional[Any]=4,__A : List[Any]=4,__A : Optional[int]="relu",__A : Optional[Any]=0.1,__A : Any=0.1,__A : Optional[Any]=0.0,__A : List[Any]=0.0,__A : List[Any]=2_0,__A : Dict=2,__A : Any=1,__A : Tuple=0,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : int = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Any = encoder_layerdrop
_lowerCamelCase : Union[str, Any] = decoder_layerdrop
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : int = eos_token_id
_lowerCamelCase : Optional[Any] = pad_token_id
_lowerCamelCase : List[Any] = bos_token_id
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : List[str] = self.eos_token_id # Eos Token
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCamelCase : Tuple = input_ids.clamp(self.pad_token_id + 1 )
_lowerCamelCase : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCamelCase : Any = self.get_config()
_lowerCamelCase : Tuple = prepare_mam_aaa_inputs_dict(__A,__A,__A )
return config, inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
return MaMaaaConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,encoder_layerdrop=self.encoder_layerdrop,decoder_layerdrop=self.decoder_layerdrop,max_position_embeddings=self.max_position_embeddings,eos_token_id=self.eos_token_id,bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,)
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : int ):
_lowerCamelCase : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
_lowerCamelCase : int = inputs_dict["input_ids"]
_lowerCamelCase : Union[str, Any] = inputs_dict["attention_mask"]
_lowerCamelCase : List[str] = inputs_dict["head_mask"]
# first forward pass
_lowerCamelCase : List[Any] = model(__A,attention_mask=__A,head_mask=__A,use_cache=__A )
_lowerCamelCase , _lowerCamelCase : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : int = ids_tensor((self.batch_size, 3),config.vocab_size )
_lowerCamelCase : Union[str, Any] = ids_tensor((self.batch_size, 3),2 )
# append to next input_ids and
_lowerCamelCase : str = torch.cat([input_ids, next_tokens],dim=-1 )
_lowerCamelCase : List[Any] = torch.cat([attention_mask, next_attn_mask],dim=-1 )
_lowerCamelCase : Tuple = model(__A,attention_mask=__A )["last_hidden_state"]
_lowerCamelCase : Any = model(__A,attention_mask=__A,past_key_values=__A )[
"last_hidden_state"
]
# select random slice
_lowerCamelCase : str = ids_tensor((1,),output_from_past.shape[-1] ).item()
_lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A,__A,atol=1e-2 ) )
def lowerCamelCase_ ( self : Any,__A : Union[str, Any],__A : Dict ):
_lowerCamelCase : Tuple = MaMaaaModel(config=__A ).to(__A ).eval()
_lowerCamelCase : List[str] = model(**__A )
_lowerCamelCase : Dict = outputs.encoder_last_hidden_state
_lowerCamelCase : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[int] = model.get_encoder()
encoder.save_pretrained(__A )
_lowerCamelCase : Dict = MaMaaaEncoder.from_pretrained(__A ).to(__A )
_lowerCamelCase : Any = encoder(inputs_dict["input_ids"],attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Tuple = model.get_decoder()
decoder.save_pretrained(__A )
_lowerCamelCase : List[str] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
_lowerCamelCase : Tuple = decoder(
input_ids=inputs_dict["decoder_input_ids"],attention_mask=inputs_dict["decoder_attention_mask"],encoder_hidden_states=__A,encoder_attention_mask=inputs_dict["attention_mask"],)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCAmelCase__ ( A , A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : str,__A : Any,__A : Dict,__A : Union[str, Any],__A : str,__A : str ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : str = MaMaaaModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self,config_class=__A )
def lowerCamelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_class.from_pretrained(__A,output_loading_info=__A )
self.assertEqual(info["missing_keys"],[] )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_lowerCamelCase : int = model_class(__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = copy.deepcopy(self._prepare_for_class(__A,__A ) )
if not self.is_encoder_decoder:
_lowerCamelCase : List[str] = inputs["input_ids"]
del inputs["input_ids"]
else:
_lowerCamelCase : Tuple = inputs["input_ids"]
_lowerCamelCase : Union[str, Any] = inputs.get("decoder_input_ids",__A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids",__A )
_lowerCamelCase : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
_lowerCamelCase : List[Any] = wte(__A )
else:
_lowerCamelCase : List[str] = wte(__A )
_lowerCamelCase : Dict = wte(__A )
with torch.no_grad():
model(**__A )[0]
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Union[str, Any] = input_dict["input_ids"]
_lowerCamelCase : Union[str, Any] = input_ids.ne(1 ).to(__A )
_lowerCamelCase : Any = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A,attention_mask=__A )
model.generate(num_beams=4,do_sample=__A,early_stopping=__A,num_return_sequences=3 )
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
_lowerCamelCase : Optional[int] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
_lowerCamelCase : List[Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
_lowerCamelCase : List[Any] = prepare_mam_aaa_inputs_dict(model.config,__A,__A )
with torch.no_grad():
_lowerCamelCase : Any = model(**__A )[0]
_lowerCamelCase : List[Any] = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape,__A )
# change to expected output here
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]],device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3],__A,atol=__A ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : int = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
_lowerCamelCase : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
_lowerCamelCase : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
_lowerCamelCase : List[Any] = prepare_mam_aaa_inputs_dict(model.config,__A,__A )
with torch.no_grad():
_lowerCamelCase : Any = model(**__A )[0]
_lowerCamelCase : str = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape,__A )
# change to expected output here
_lowerCamelCase : str = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]],device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3],__A,atol=__A ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
_lowerCamelCase : Any = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M",src_lang="fr",tgt_lang="en" )
_lowerCamelCase : Union[str, Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_lowerCamelCase : str = tokenizer(__A,padding=__A,return_tensors="pt" )
_lowerCamelCase : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ),attention_mask=dct["attention_mask"].to(__A ),num_beams=5,forced_bos_token_id=tokenizer.get_lang_id("en" ),)
_lowerCamelCase : str = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
_lowerCamelCase : str = tokenizer.batch_decode(
hypotheses_batch.tolist(),clean_up_tokenization_spaces=__A,skip_special_tokens=__A )
assert generated == expected_en
| 44 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 1 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = torch.exp(_lowerCAmelCase )
_lowerCamelCase : int = torch.sum(_lowerCAmelCase , dim=1 ) # sum of exp(x_i)
_lowerCamelCase : Optional[int] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCAmelCase ) - B / A
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : Tuple,__A : List[Any] ):
super().__init__()
_lowerCamelCase : Optional[int] = config.output_attentions
_lowerCamelCase : Any = config.output_hidden_states
_lowerCamelCase : str = nn.ModuleList([BertLayer(__A ) for _ in range(config.num_hidden_layers )] )
_lowerCamelCase : str = nn.ModuleList([BertHighway(__A ) for _ in range(config.num_hidden_layers )] )
_lowerCamelCase : List[str] = [-1 for _ in range(config.num_hidden_layers )]
def lowerCamelCase_ ( self : List[Any],__A : int ):
if (type(__A ) is float) or (type(__A ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCamelCase : List[Any] = x
else:
_lowerCamelCase : List[str] = x
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, Any] ):
_lowerCamelCase : List[str] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCamelCase_ ( self : List[str],__A : int,__A : Any=None,__A : Any=None,__A : int=None,__A : Optional[int]=None,):
_lowerCamelCase : Tuple = ()
_lowerCamelCase : Tuple = ()
_lowerCamelCase : Optional[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCamelCase : int = all_hidden_states + (hidden_states,)
_lowerCamelCase : List[Any] = layer_module(
__A,__A,head_mask[i],__A,__A )
_lowerCamelCase : Dict = layer_outputs[0]
if self.output_attentions:
_lowerCamelCase : str = all_attentions + (layer_outputs[1],)
_lowerCamelCase : List[str] = (hidden_states,)
if self.output_hidden_states:
_lowerCamelCase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCamelCase : Dict = current_outputs + (all_attentions,)
_lowerCamelCase : Any = self.highway[i](__A )
# logits, pooled_output
if not self.training:
_lowerCamelCase : int = highway_exit[0]
_lowerCamelCase : Optional[Any] = entropy(__A )
_lowerCamelCase : Tuple = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCamelCase : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCamelCase : int = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__A,i + 1 )
else:
_lowerCamelCase : int = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCamelCase : Optional[Any] = all_hidden_states + (hidden_states,)
_lowerCamelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCamelCase : int = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCamelCase : Optional[int] = outputs + (all_attentions,)
_lowerCamelCase : Tuple = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , A , )
class UpperCAmelCase__ ( A ):
def __init__( self : Any,__A : int ):
super().__init__(__A )
_lowerCamelCase : str = config
_lowerCamelCase : str = BertEmbeddings(__A )
_lowerCamelCase : Tuple = DeeBertEncoder(__A )
_lowerCamelCase : List[str] = BertPooler(__A )
self.init_weights()
def lowerCamelCase_ ( self : Dict ):
self.encoder.init_highway_pooler(self.pooler )
def lowerCamelCase_ ( self : Tuple ):
return self.embeddings.word_embeddings
def lowerCamelCase_ ( self : Any,__A : Optional[int] ):
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : Tuple,__A : int ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__A )
@add_start_docstrings_to_model_forward(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[str]=None,__A : int=None,__A : Union[str, Any]=None,__A : Tuple=None,__A : Dict=None,__A : int=None,__A : Dict=None,__A : List[str]=None,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
_lowerCamelCase : List[str] = input_ids.size()
elif inputs_embeds is not None:
_lowerCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
_lowerCamelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCamelCase : List[str] = torch.ones(__A,device=__A )
if encoder_attention_mask is None:
_lowerCamelCase : List[Any] = torch.ones(__A,device=__A )
if token_type_ids is None:
_lowerCamelCase : List[str] = torch.zeros(__A,dtype=torch.long,device=__A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCamelCase : torch.Tensor = self.get_extended_attention_mask(__A,__A,__A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCamelCase : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCamelCase : Optional[int] = encoder_attention_mask[:, None, None, :]
_lowerCamelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCamelCase : Any = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCamelCase : Union[str, Any] = self.get_head_mask(__A,self.config.num_hidden_layers )
_lowerCamelCase : List[Any] = self.embeddings(
input_ids=__A,position_ids=__A,token_type_ids=__A,inputs_embeds=__A )
_lowerCamelCase : Union[str, Any] = self.encoder(
__A,attention_mask=__A,head_mask=__A,encoder_hidden_states=__A,encoder_attention_mask=__A,)
_lowerCamelCase : Union[str, Any] = encoder_outputs[0]
_lowerCamelCase : Tuple = self.pooler(__A )
_lowerCamelCase : Optional[int] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : int,__A : int ):
_lowerCamelCase : Tuple = message
_lowerCamelCase : Union[str, Any] = exit_layer # start from 1!
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : int,__A : List[Any] ):
super().__init__()
_lowerCamelCase : List[Any] = BertPooler(__A )
_lowerCamelCase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase : Tuple = nn.Linear(config.hidden_size,config.num_labels )
def lowerCamelCase_ ( self : Tuple,__A : Union[str, Any] ):
# Pooler
_lowerCamelCase : int = encoder_outputs[0]
_lowerCamelCase : Tuple = self.pooler(__A )
# "return" pooler_output
# BertModel
_lowerCamelCase : Any = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCamelCase : List[Any] = bmodel_output[1]
_lowerCamelCase : Optional[int] = self.dropout(__A )
_lowerCamelCase : Optional[Any] = self.classifier(__A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , A , )
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : Optional[int] ):
super().__init__(__A )
_lowerCamelCase : Tuple = config.num_labels
_lowerCamelCase : Dict = config.num_hidden_layers
_lowerCamelCase : Union[str, Any] = DeeBertModel(__A )
_lowerCamelCase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase : int = nn.Linear(config.hidden_size,self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__A )
def lowerCamelCase_ ( self : Dict,__A : Any=None,__A : Optional[Any]=None,__A : Union[str, Any]=None,__A : Union[str, Any]=None,__A : Optional[int]=None,__A : Optional[int]=None,__A : Union[str, Any]=None,__A : Optional[int]=-1,__A : Any=False,):
_lowerCamelCase : Optional[Any] = self.num_layers
try:
_lowerCamelCase : Optional[Any] = self.bert(
__A,attention_mask=__A,token_type_ids=__A,position_ids=__A,head_mask=__A,inputs_embeds=__A,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCamelCase : Union[str, Any] = outputs[1]
_lowerCamelCase : List[Any] = self.dropout(__A )
_lowerCamelCase : str = self.classifier(__A )
_lowerCamelCase : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCamelCase : Tuple = e.message
_lowerCamelCase : Optional[int] = e.exit_layer
_lowerCamelCase : Optional[Any] = outputs[0]
if not self.training:
_lowerCamelCase : List[Any] = entropy(__A )
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase : List[Any] = MSELoss()
_lowerCamelCase : Tuple = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
_lowerCamelCase : Optional[int] = CrossEntropyLoss()
_lowerCamelCase : Optional[int] = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
_lowerCamelCase : Union[str, Any] = []
for highway_exit in outputs[-1]:
_lowerCamelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase : List[str] = MSELoss()
_lowerCamelCase : str = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
_lowerCamelCase : Optional[int] = CrossEntropyLoss()
_lowerCamelCase : str = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
_lowerCamelCase : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCamelCase : Union[str, Any] = (loss,) + outputs
if not self.training:
_lowerCamelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCamelCase : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 44 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any=0.9_9_9 , _lowerCAmelCase : int="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase : List[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase : Optional[int] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_lowerCamelCase : str = []
for i in range(_lowerCAmelCase ):
_lowerCamelCase : Optional[int] = i / num_diffusion_timesteps
_lowerCamelCase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class UpperCAmelCase__ ( A , A ):
lowerCAmelCase_ = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase_ = 2
@register_to_config
def __init__( self : List[Any],__A : int = 1_0_0_0,__A : float = 0.00085,__A : float = 0.012,__A : str = "linear",__A : Optional[Union[np.ndarray, List[float]]] = None,__A : str = "epsilon",__A : str = "linspace",__A : int = 0,):
if trained_betas is not None:
_lowerCamelCase : List[str] = torch.tensor(__A,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCamelCase : Optional[Any] = torch.linspace(__A,__A,__A,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : List[Any] = (
torch.linspace(beta_start**0.5,beta_end**0.5,__A,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__A )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
_lowerCamelCase : Dict = 1.0 - self.betas
_lowerCamelCase : List[Any] = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any],__A : Union[str, Any],__A : List[str]=None ):
if schedule_timesteps is None:
_lowerCamelCase : Tuple = self.timesteps
_lowerCamelCase : Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCamelCase : Optional[int] = 1 if len(__A ) > 1 else 0
else:
_lowerCamelCase : Optional[Any] = timestep.cpu().item() if torch.is_tensor(__A ) else timestep
_lowerCamelCase : List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase_ ( self : Tuple ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase_ ( self : Dict,__A : torch.FloatTensor,__A : Union[float, torch.FloatTensor],):
_lowerCamelCase : Tuple = self.index_for_timestep(__A )
if self.state_in_first_order:
_lowerCamelCase : str = self.sigmas[step_index]
else:
_lowerCamelCase : Dict = self.sigmas_interpol[step_index]
_lowerCamelCase : Tuple = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase_ ( self : List[str],__A : int,__A : Union[str, torch.device] = None,__A : Optional[int] = None,):
_lowerCamelCase : int = num_inference_steps
_lowerCamelCase : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCamelCase : Optional[Any] = np.linspace(0,num_train_timesteps - 1,__A,dtype=__A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCamelCase : Optional[int] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Tuple = (np.arange(0,__A ) * step_ratio).round()[::-1].copy().astype(__A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCamelCase : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Union[str, Any] = (np.arange(__A,0,-step_ratio )).round().copy().astype(__A )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
_lowerCamelCase : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCamelCase : Dict = torch.from_numpy(np.log(__A ) ).to(__A )
_lowerCamelCase : Union[str, Any] = np.interp(__A,np.arange(0,len(__A ) ),__A )
_lowerCamelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCamelCase : Dict = torch.from_numpy(__A ).to(device=__A )
# interpolate sigmas
_lowerCamelCase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log(),0.5 ).exp()
_lowerCamelCase : Tuple = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCamelCase : Dict = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__A ).startswith("mps" ):
# mps does not support float64
_lowerCamelCase : List[Any] = torch.from_numpy(__A ).to(__A,dtype=torch.floataa )
else:
_lowerCamelCase : Tuple = torch.from_numpy(__A ).to(__A )
# interpolate timesteps
_lowerCamelCase : Optional[Any] = self.sigma_to_t(__A ).to(__A,dtype=timesteps.dtype )
_lowerCamelCase : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]),dim=-1 ).flatten()
_lowerCamelCase : Any = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowerCamelCase : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCamelCase : List[str] = defaultdict(__A )
def lowerCamelCase_ ( self : List[Any],__A : Dict ):
# get log sigma
_lowerCamelCase : Any = sigma.log()
# get distribution
_lowerCamelCase : int = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCamelCase : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowerCamelCase : Dict = low_idx + 1
_lowerCamelCase : Dict = self.log_sigmas[low_idx]
_lowerCamelCase : Dict = self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCamelCase : List[str] = (low - log_sigma) / (low - high)
_lowerCamelCase : Dict = w.clamp(0,1 )
# transform interpolation to time range
_lowerCamelCase : Dict = (1 - w) * low_idx + w * high_idx
_lowerCamelCase : List[str] = t.view(sigma.shape )
return t
@property
def lowerCamelCase_ ( self : Any ):
return self.sample is None
def lowerCamelCase_ ( self : Optional[int],__A : Union[torch.FloatTensor, np.ndarray],__A : Union[float, torch.FloatTensor],__A : Union[torch.FloatTensor, np.ndarray],__A : bool = True,):
_lowerCamelCase : List[str] = self.index_for_timestep(__A )
# advance index counter by 1
_lowerCamelCase : str = timestep.cpu().item() if torch.is_tensor(__A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCamelCase : int = self.sigmas[step_index]
_lowerCamelCase : List[Any] = self.sigmas_interpol[step_index + 1]
_lowerCamelCase : Dict = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCamelCase : Union[str, Any] = self.sigmas[step_index - 1]
_lowerCamelCase : Union[str, Any] = self.sigmas_interpol[step_index]
_lowerCamelCase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCamelCase : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCamelCase : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCamelCase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCamelCase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCamelCase : List[str] = sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCamelCase : Union[str, Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCamelCase : str = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCamelCase : str = sigma_next - sigma_hat
_lowerCamelCase : List[Any] = self.sample
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def lowerCamelCase_ ( self : Any,__A : torch.FloatTensor,__A : torch.FloatTensor,__A : torch.FloatTensor,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowerCamelCase : Union[str, Any] = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__A ):
# mps does not support float64
_lowerCamelCase : List[Any] = self.timesteps.to(original_samples.device,dtype=torch.floataa )
_lowerCamelCase : str = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
_lowerCamelCase : Tuple = self.timesteps.to(original_samples.device )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : Optional[int] = [self.index_for_timestep(__A,__A ) for t in timesteps]
_lowerCamelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCamelCase : Dict = sigma.unsqueeze(-1 )
_lowerCamelCase : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str] ):
return self.config.num_train_timesteps
| 44 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
UpperCAmelCase_ : Any = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
UpperCAmelCase_ : Union[str, Any] = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
UpperCAmelCase_ : List[str] = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ),reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"],)
def lowerCamelCase_ ( self : List[Any],__A : Optional[Any],__A : Tuple,__A : Tuple=False ):
_lowerCamelCase : Any = spearmanr(__A,__A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 44 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( A ):
def __init__( self : Tuple,__A : Any=None,__A : Dict=None,*__A : Tuple,**__A : List[Any] ):
super().__init__(*__A,**__A )
if config is None:
assert isinstance(self.model,__A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
_lowerCamelCase : List[str] = self.model.config
else:
_lowerCamelCase : Any = config
_lowerCamelCase : Union[str, Any] = data_args
_lowerCamelCase : int = self.config.tgt_vocab_size if isinstance(self.config,__A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
" padding.." )
if self.args.label_smoothing == 0:
_lowerCamelCase : Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_lowerCamelCase : int = label_smoothed_nll_loss
def lowerCamelCase_ ( self : Optional[int],__A : int ):
if self.optimizer is None:
_lowerCamelCase : Any = ["bias", "LayerNorm.weight"]
_lowerCamelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_lowerCamelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_lowerCamelCase : Any = Adafactor
_lowerCamelCase : Any = {"scale_parameter": False, "relative_step": False}
else:
_lowerCamelCase : int = AdamW
_lowerCamelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_lowerCamelCase : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_lowerCamelCase : List[str] = OSS(
params=__A,optim=__A,**__A,)
else:
_lowerCamelCase : str = optimizer_cls(__A,**__A )
if self.lr_scheduler is None:
_lowerCamelCase : List[str] = self._get_lr_scheduler(__A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowerCamelCase_ ( self : Optional[int],__A : List[Any] ):
_lowerCamelCase : Optional[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_lowerCamelCase : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_lowerCamelCase : Optional[int] = schedule_func(self.optimizer,num_warmup_steps=self.args.warmup_steps )
else:
_lowerCamelCase : Tuple = schedule_func(
self.optimizer,num_warmup_steps=self.args.warmup_steps,num_training_steps=__A )
return scheduler
def lowerCamelCase_ ( self : Union[str, Any] ):
if isinstance(self.train_dataset,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED),)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : Any ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_lowerCamelCase : Optional[Any] = model(**__A,use_cache=__A )[0]
_lowerCamelCase : Optional[int] = self.loss_fn(logits.view(-1,logits.shape[-1] ),labels.view(-1 ) )
else:
# compute usual loss via models
_lowerCamelCase , _lowerCamelCase : str = model(**__A,labels=__A,use_cache=__A )[:2]
else:
# compute label smoothed loss
_lowerCamelCase : int = model(**__A,use_cache=__A )[0]
_lowerCamelCase : str = torch.nn.functional.log_softmax(__A,dim=-1 )
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.loss_fn(__A,__A,self.args.label_smoothing,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase_ ( self : List[str],__A : int,__A : str ):
_lowerCamelCase : List[Any] = inputs.pop("labels" )
_lowerCamelCase , _lowerCamelCase : int = self._compute_loss(__A,__A,__A )
return loss
def lowerCamelCase_ ( self : Union[str, Any],__A : nn.Module,__A : Dict[str, Union[torch.Tensor, Any]],__A : bool,__A : Optional[List[str]] = None,):
_lowerCamelCase : List[str] = self._prepare_inputs(__A )
_lowerCamelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_lowerCamelCase : Dict = self.model.generate(
inputs["input_ids"],attention_mask=inputs["attention_mask"],**__A,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_lowerCamelCase : Dict = self._pad_tensors_to_max_len(__A,gen_kwargs["max_length"] )
_lowerCamelCase : Optional[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_lowerCamelCase , _lowerCamelCase : List[Any] = self._compute_loss(__A,__A,__A )
_lowerCamelCase : List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_lowerCamelCase : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_lowerCamelCase : Any = self._pad_tensors_to_max_len(__A,gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowerCamelCase_ ( self : int,__A : Dict,__A : int ):
# If PAD token is not defined at least EOS token has to be defined
_lowerCamelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f' padded to `max_length`={max_length}' )
_lowerCamelCase : Union[str, Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length),dtype=tensor.dtype,device=tensor.device )
_lowerCamelCase : Dict = tensor
return padded_tensor
| 44 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : CommonSchedulerState,__A : jnp.ndarray,__A : jnp.ndarray ):
return cls(common=__A,init_noise_sigma=__A,timesteps=__A )
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A , A ):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def lowerCamelCase_ ( self : Dict ):
return True
@register_to_config
def __init__( self : List[str],__A : int = 1_0_0_0,__A : float = 0.0001,__A : float = 0.02,__A : str = "linear",__A : Optional[jnp.ndarray] = None,__A : str = "fixed_small",__A : bool = True,__A : str = "epsilon",__A : jnp.dtype = jnp.floataa,):
_lowerCamelCase : str = dtype
def lowerCamelCase_ ( self : Any,__A : Optional[CommonSchedulerState] = None ):
if common is None:
_lowerCamelCase : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = jnp.array(1.0,dtype=self.dtype )
_lowerCamelCase : Tuple = jnp.arange(0,self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__A,init_noise_sigma=__A,timesteps=__A,)
def lowerCamelCase_ ( self : Tuple,__A : DDPMSchedulerState,__A : jnp.ndarray,__A : Optional[int] = None ):
return sample
def lowerCamelCase_ ( self : List[Any],__A : DDPMSchedulerState,__A : int,__A : Tuple = () ):
_lowerCamelCase : int = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_lowerCamelCase : str = (jnp.arange(0,__A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A,timesteps=__A,)
def lowerCamelCase_ ( self : Any,__A : DDPMSchedulerState,__A : Optional[int],__A : int=None,__A : str=None ):
_lowerCamelCase : List[str] = state.common.alphas_cumprod[t]
_lowerCamelCase : Tuple = jnp.where(t > 0,state.common.alphas_cumprod[t - 1],jnp.array(1.0,dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_lowerCamelCase : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_lowerCamelCase : List[str] = jnp.clip(__A,a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_lowerCamelCase : Dict = jnp.log(jnp.clip(__A,a_min=1e-20 ) )
elif variance_type == "fixed_large":
_lowerCamelCase : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_lowerCamelCase : Optional[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_lowerCamelCase : Optional[int] = variance
_lowerCamelCase : str = state.common.betas[t]
_lowerCamelCase : Optional[Any] = (predicted_variance + 1) / 2
_lowerCamelCase : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self : int,__A : DDPMSchedulerState,__A : jnp.ndarray,__A : int,__A : jnp.ndarray,__A : Optional[jax.random.KeyArray] = None,__A : bool = True,):
_lowerCamelCase : int = timestep
if key is None:
_lowerCamelCase : Dict = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_lowerCamelCase , _lowerCamelCase : Optional[int] = jnp.split(__A,sample.shape[1],axis=1 )
else:
_lowerCamelCase : Optional[Any] = None
# 1. compute alphas, betas
_lowerCamelCase : Optional[int] = state.common.alphas_cumprod[t]
_lowerCamelCase : List[str] = jnp.where(t > 0,state.common.alphas_cumprod[t - 1],jnp.array(1.0,dtype=self.dtype ) )
_lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCamelCase : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : str = model_output
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : str = jnp.clip(__A,-1,1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_lowerCamelCase : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_lowerCamelCase : Any = jax.random.split(__A,num=1 )
_lowerCamelCase : List[str] = jax.random.normal(__A,shape=model_output.shape,dtype=self.dtype )
return (self._get_variance(__A,__A,predicted_variance=__A ) ** 0.5) * noise
_lowerCamelCase : Optional[int] = jnp.where(t > 0,random_variance(),jnp.zeros(model_output.shape,dtype=self.dtype ) )
_lowerCamelCase : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A,state=__A )
def lowerCamelCase_ ( self : Union[str, Any],__A : DDPMSchedulerState,__A : jnp.ndarray,__A : jnp.ndarray,__A : jnp.ndarray,):
return add_noise_common(state.common,__A,__A,__A )
def lowerCamelCase_ ( self : Dict,__A : DDPMSchedulerState,__A : jnp.ndarray,__A : jnp.ndarray,__A : jnp.ndarray,):
return get_velocity_common(state.common,__A,__A,__A )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 44 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'Salesforce/blip-image-captioning-base'
lowerCAmelCase_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCAmelCase_ = 'image_captioner'
lowerCAmelCase_ = AutoModelForVisionaSeq
lowerCAmelCase_ = ['image']
lowerCAmelCase_ = ['text']
def __init__( self : Optional[int],*__A : List[str],**__A : Optional[int] ):
requires_backends(self,["vision"] )
super().__init__(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : "Image" ):
return self.pre_processor(images=__A,return_tensors="pt" )
def lowerCamelCase_ ( self : Any,__A : Dict ):
return self.model.generate(**__A )
def lowerCamelCase_ ( self : int,__A : List[str] ):
return self.pre_processor.batch_decode(__A,skip_special_tokens=__A )[0].strip()
| 44 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = list(_lowerCAmelCase )
_lowerCamelCase : List[Any] = list(_lowerCAmelCase )
_lowerCamelCase : List[str] = 0
for i in range(len(_lowerCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
_lowerCamelCase : Dict = "_"
if count > 1:
return False
else:
return "".join(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
while True:
_lowerCamelCase : Tuple = ["$"] * len(_lowerCAmelCase )
_lowerCamelCase : Tuple = []
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
_lowerCamelCase : Dict = compare_string(binary[i] , binary[j] )
if k is False:
_lowerCamelCase : Optional[int] = "*"
_lowerCamelCase : Any = "*"
temp.append("X" )
for i in range(len(_lowerCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCAmelCase ) == 0:
return pi
_lowerCamelCase : int = list(set(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Sequence[float] ):
"""simple docstring"""
_lowerCamelCase : Tuple = []
for minterm in minterms:
_lowerCamelCase : List[Any] = ""
for _ in range(_lowerCAmelCase ):
_lowerCamelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCAmelCase )
return temp
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = list(_lowerCAmelCase )
_lowerCamelCase : Any = list(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A_ ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : list[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = [0] * len(_lowerCAmelCase )
for i in range(len(chart[0] ) ):
_lowerCamelCase : int = 0
_lowerCamelCase : Union[str, Any] = -1
for j in range(len(_lowerCAmelCase ) ):
if chart[j][i] == 1:
count += 1
_lowerCamelCase : Optional[Any] = j
if count == 1:
_lowerCamelCase : List[Any] = 1
for i in range(len(_lowerCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = 0
temp.append(prime_implicants[i] )
while True:
_lowerCamelCase : int = 0
_lowerCamelCase : int = -1
_lowerCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : List[str] = chart[i].count(1 )
if count_n > max_n:
_lowerCamelCase : List[Any] = count_n
_lowerCamelCase : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : int = 0
def A_ ( _lowerCAmelCase : list[str] , _lowerCAmelCase : list[str] ):
"""simple docstring"""
_lowerCamelCase : str = [[0 for x in range(len(_lowerCAmelCase ) )] for x in range(len(_lowerCAmelCase ) )]
for i in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : str = prime_implicants[i].count("_" )
for j in range(len(_lowerCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCAmelCase ):
_lowerCamelCase : int = 1
return chart
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = int(input("Enter the no. of variables\n" ) )
_lowerCamelCase : Tuple = [
float(_lowerCAmelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
_lowerCamelCase : Dict = decimal_to_binary(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : str = check(_lowerCAmelCase )
print("Prime Implicants are:" )
print(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = prime_implicant_chart(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Any = selection(_lowerCAmelCase , _lowerCAmelCase )
print("Essential Prime Implicants are:" )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 44 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
UpperCAmelCase_ : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ : Optional[Any] = logging.getLogger()
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
_lowerCamelCase : Tuple = parser.parse_args()
return args.f
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]="eval" ):
"""simple docstring"""
_lowerCamelCase : Any = os.path.join(_lowerCAmelCase , F'{split}_results.json' )
if os.path.exists(_lowerCAmelCase ):
with open(_lowerCAmelCase , "r" ) as f:
return json.load(_lowerCAmelCase )
raise ValueError(F'can\'t find {path}' )
UpperCAmelCase_ : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[Any] = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(__A,"argv",__A ):
run_flax_glue.main()
_lowerCamelCase : Optional[int] = get_results(__A )
self.assertGreaterEqual(result["eval_accuracy"],0.75 )
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Union[str, Any] = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(__A,"argv",__A ):
run_clm_flax.main()
_lowerCamelCase : Union[str, Any] = get_results(__A )
self.assertLess(result["eval_perplexity"],1_0_0 )
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Any = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(__A,"argv",__A ):
run_summarization_flax.main()
_lowerCamelCase : List[str] = get_results(__A,split="test" )
self.assertGreaterEqual(result["test_rouge1"],1_0 )
self.assertGreaterEqual(result["test_rouge2"],2 )
self.assertGreaterEqual(result["test_rougeL"],7 )
self.assertGreaterEqual(result["test_rougeLsum"],7 )
@slow
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Any = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(__A,"argv",__A ):
run_mlm_flax.main()
_lowerCamelCase : List[str] = get_results(__A )
self.assertLess(result["eval_perplexity"],4_2 )
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = self.get_auto_remove_tmp_dir()
_lowerCamelCase : str = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(__A,"argv",__A ):
run_ta_mlm_flax.main()
_lowerCamelCase : List[Any] = get_results(__A )
self.assertGreaterEqual(result["eval_accuracy"],0.42 )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_lowerCamelCase : Optional[Any] = 7 if get_gpu_count() > 1 else 2
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Any = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(__A,"argv",__A ):
run_flax_ner.main()
_lowerCamelCase : str = get_results(__A )
self.assertGreaterEqual(result["eval_accuracy"],0.75 )
self.assertGreaterEqual(result["eval_f1"],0.3 )
@slow
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : str = self.get_auto_remove_tmp_dir()
_lowerCamelCase : str = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(__A,"argv",__A ):
run_qa.main()
_lowerCamelCase : str = get_results(__A )
self.assertGreaterEqual(result["eval_f1"],3_0 )
self.assertGreaterEqual(result["eval_exact"],3_0 )
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = original_name.split("." )[0]
_lowerCamelCase : List[Any] = key.split("." )
_lowerCamelCase : Dict = int(key_list[key_list.index(_lowerCAmelCase ) - 2] )
_lowerCamelCase : Union[str, Any] = int(key_list[key_list.index(_lowerCAmelCase ) - 1] )
_lowerCamelCase : Optional[int] = orig_block_num - offset
_lowerCamelCase : Dict = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = OrderedDict()
_lowerCamelCase , _lowerCamelCase : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_lowerCamelCase : List[str] = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCamelCase : Dict = key[: key.find("proj" )]
_lowerCamelCase : int = key.replace(_lowerCAmelCase , F'patch_embeddings.{total_embed_found}.' )
_lowerCamelCase : Tuple = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCamelCase : List[Any] = "poolformer.encoder." + key
if "mlp.fc1" in key:
_lowerCamelCase : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_lowerCamelCase : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_lowerCamelCase : int = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm1" , "before_norm" )
if "norm2" in key:
_lowerCamelCase : int = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_lowerCamelCase : str = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_lowerCamelCase : Optional[int] = replace_key_with_offset(_lowerCAmelCase , _lowerCAmelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_lowerCamelCase : Tuple = key.replace("head" , "classifier" )
_lowerCamelCase : Tuple = value
return new_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = PoolFormerConfig()
# set attributes based on model_name
_lowerCamelCase : Optional[int] = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = model_name[-3:]
_lowerCamelCase : str = 1000
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[Any] = (1, 1000)
# set config attributes
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCamelCase : List[Any] = [2, 2, 6, 2]
_lowerCamelCase : Optional[int] = [64, 128, 320, 512]
_lowerCamelCase : Any = 4.0
_lowerCamelCase : int = 0.9
elif size == "s24":
_lowerCamelCase : List[str] = [4, 4, 12, 4]
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 4.0
_lowerCamelCase : Dict = 0.9
elif size == "s36":
_lowerCamelCase : List[str] = [6, 6, 18, 6]
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 4.0
_lowerCamelCase : Optional[int] = 1E-6
_lowerCamelCase : Union[str, Any] = 0.9
elif size == "m36":
_lowerCamelCase : Optional[Any] = [6, 6, 18, 6]
_lowerCamelCase : Dict = [96, 192, 384, 768]
_lowerCamelCase : Optional[Any] = 4.0
_lowerCamelCase : Union[str, Any] = 1E-6
_lowerCamelCase : Tuple = 0.9_5
elif size == "m48":
_lowerCamelCase : Optional[Any] = [8, 8, 24, 8]
_lowerCamelCase : Optional[Any] = [96, 192, 384, 768]
_lowerCamelCase : List[str] = 4.0
_lowerCamelCase : Union[str, Any] = 1E-6
_lowerCamelCase : str = 0.9_5
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
_lowerCamelCase : Union[str, Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
# Prepare image
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : List[str] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location=torch.device("cpu" ) )
# rename keys
_lowerCamelCase : Dict = rename_keys(_lowerCAmelCase )
# create HuggingFace model and load state dict
_lowerCamelCase : Optional[Any] = PoolFormerForImageClassification(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Define image processor
_lowerCamelCase : Optional[Any] = PoolFormerImageProcessor(crop_pct=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
_lowerCamelCase : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCamelCase : Tuple = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
_lowerCamelCase : Tuple = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
_lowerCamelCase : Tuple = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
_lowerCamelCase : Dict = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
_lowerCamelCase : str = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 20 ):
"""simple docstring"""
_lowerCamelCase : Tuple = 1
for i in range(1 , n + 1 ):
_lowerCamelCase : Any = lcm(_lowerCAmelCase , _lowerCAmelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'codegen'
lowerCAmelCase_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict,__A : Optional[int]=5_0_4_0_0,__A : Optional[int]=2_0_4_8,__A : Optional[Any]=2_0_4_8,__A : Optional[int]=4_0_9_6,__A : List[Any]=2_8,__A : Optional[Any]=1_6,__A : int=6_4,__A : str=None,__A : Dict="gelu_new",__A : Tuple=0.0,__A : List[Any]=0.0,__A : Dict=0.0,__A : List[Any]=1e-5,__A : List[str]=0.02,__A : Tuple=True,__A : List[str]=5_0_2_5_6,__A : Optional[int]=5_0_2_5_6,__A : int=False,**__A : List[Any],):
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : str = n_ctx
_lowerCamelCase : Optional[int] = n_positions
_lowerCamelCase : Any = n_embd
_lowerCamelCase : Dict = n_layer
_lowerCamelCase : Tuple = n_head
_lowerCamelCase : str = n_inner
_lowerCamelCase : List[str] = rotary_dim
_lowerCamelCase : str = activation_function
_lowerCamelCase : Any = resid_pdrop
_lowerCamelCase : List[Any] = embd_pdrop
_lowerCamelCase : Optional[Any] = attn_pdrop
_lowerCamelCase : Optional[int] = layer_norm_epsilon
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : str = bos_token_id
_lowerCamelCase : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__A,eos_token_id=__A,tie_word_embeddings=__A,**__A )
class UpperCAmelCase__ ( A ):
def __init__( self : Any,__A : PretrainedConfig,__A : str = "default",__A : List[PatchingSpec] = None,__A : bool = False,):
super().__init__(__A,task=__A,patching_specs=__A,use_past=__A )
if not getattr(self._config,"pad_token_id",__A ):
# TODO: how to do that better?
_lowerCamelCase : Optional[Any] = 0
@property
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__A,direction="inputs" )
_lowerCamelCase : List[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCamelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase_ ( self : int ):
return self._config.n_layer
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return self._config.n_head
def lowerCamelCase_ ( self : Any,__A : PreTrainedTokenizer,__A : int = -1,__A : int = -1,__A : bool = False,__A : Optional[TensorType] = None,):
_lowerCamelCase : Any = super(__A,self ).generate_dummy_inputs(
__A,batch_size=__A,seq_length=__A,is_pair=__A,framework=__A )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase , _lowerCamelCase : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCamelCase : List[Any] = seqlen + 2
_lowerCamelCase : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCamelCase : Dict = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowerCamelCase : Dict = common_inputs["attention_mask"]
if self.use_past:
_lowerCamelCase : Optional[int] = ordered_inputs["attention_mask"].dtype
_lowerCamelCase : str = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__A,__A,dtype=__A )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self : Tuple ):
return 1_3
| 44 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Tuple = (
"Wrong input data's dimensions... "
F'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Tuple = (
"Wrong input data's shape... "
F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : List[str] = (
"Input data have different datatype... "
F'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
for value in value_array:
_lowerCamelCase : Optional[int] = euclidean(_lowerCAmelCase , dataset[0] )
_lowerCamelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : int = euclidean(_lowerCAmelCase , _lowerCAmelCase )
if dist > temp_dist:
_lowerCamelCase : int = temp_dist
_lowerCamelCase : Union[str, Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=False , _lowerCAmelCase : Any=False , _lowerCAmelCase : Dict=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_lowerCamelCase : str = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Any = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Dict = in_proj_bias[: config.hidden_size]
_lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : int = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
@torch.no_grad()
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCAmelCase )
_lowerCamelCase : List[str] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[str] = False
if "vqa" in checkpoint_url:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = 3129
_lowerCamelCase : Tuple = "huggingface/label-files"
_lowerCamelCase : List[str] = "vqa2-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = idalabel
_lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = ViltForQuestionAnswering(_lowerCAmelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : int = True
_lowerCamelCase : Dict = 2
_lowerCamelCase : Tuple = {0: "False", 1: "True"}
_lowerCamelCase : Any = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Dict = 3
_lowerCamelCase : Dict = ViltForImagesAndTextClassification(_lowerCAmelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Tuple = ViltForImageAndTextRetrieval(_lowerCAmelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Dict = ViltForMaskedLM(_lowerCAmelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
if mlm_model or irtr_model:
_lowerCamelCase : int = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase , _lowerCamelCase : Any = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCAmelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : List[str] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCAmelCase , _lowerCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : Optional[Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCAmelCase ).raw )
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCAmelCase ).raw )
_lowerCamelCase : Union[str, Any] = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : Dict = processor(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : Any = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCAmelCase ).raw )
if mlm_model:
_lowerCamelCase : str = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[Any] = "How many cats are there?"
_lowerCamelCase : Optional[int] = processor(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Optional[Any] = model(**_lowerCAmelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : Tuple = torch.Size([1, 11, 30522] )
_lowerCamelCase : List[str] = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCAmelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : Optional[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : Optional[int] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCAmelCase , atol=1E-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : str = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : Tuple = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : str = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 44 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ : Optional[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : int = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCamelCase : int = get_sagemaker_input()
else:
_lowerCamelCase : Dict = get_cluster_input()
return config
def A_ ( _lowerCAmelCase : str=None ):
"""simple docstring"""
if subparsers is not None:
_lowerCamelCase : Any = subparsers.add_parser("config" , description=_lowerCAmelCase )
else:
_lowerCamelCase : Optional[int] = argparse.ArgumentParser("Accelerate config command" , description=_lowerCAmelCase )
parser.add_argument(
"--config_file" , default=_lowerCAmelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = get_user_input()
if args.config_file is not None:
_lowerCamelCase : List[str] = args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
_lowerCamelCase : str = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(F'accelerate configuration saved at {config_file}' )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = config_command_parser()
_lowerCamelCase : Tuple = parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 1 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 1 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : list[tuple[float, float]] ):
_lowerCamelCase : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : int = len(__A ) - 1
def lowerCamelCase_ ( self : Optional[int],__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,__A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ),5 ) == 1
return output_values
def lowerCamelCase_ ( self : int,__A : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : List[Any] = self.basis_function(__A )
_lowerCamelCase : str = 0.0
_lowerCamelCase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase_ ( self : Optional[Any],__A : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : Tuple = 0.0
while t <= 1:
_lowerCamelCase : str = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowerCamelCase : List[str] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__A,__A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(__A,__A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 44 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=6_4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=1 , ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = parent
__magic_name__ :int = batch_size
__magic_name__ :int = seq_length
__magic_name__ :List[Any] = is_training
__magic_name__ :Any = use_input_mask
__magic_name__ :List[Any] = use_token_type_ids
__magic_name__ :Any = use_labels
__magic_name__ :str = vocab_size
__magic_name__ :Tuple = hidden_size
__magic_name__ :List[str] = num_hidden_layers
__magic_name__ :Dict = num_attention_heads
__magic_name__ :str = intermediate_size
__magic_name__ :Optional[int] = hidden_act
__magic_name__ :List[Any] = hidden_dropout_prob
__magic_name__ :List[str] = attention_probs_dropout_prob
__magic_name__ :List[str] = max_position_embeddings
__magic_name__ :Union[str, Any] = type_vocab_size
__magic_name__ :int = type_sequence_label_size
__magic_name__ :Optional[Any] = initializer_range
__magic_name__ :int = num_labels
__magic_name__ :Tuple = num_choices
__magic_name__ :Optional[int] = scope
__magic_name__ :Tuple = q_groups
__magic_name__ :List[str] = k_groups
__magic_name__ :str = v_groups
__magic_name__ :Optional[Any] = post_attention_groups
__magic_name__ :List[str] = intermediate_groups
__magic_name__ :int = output_groups
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :List[Any] = None
if self.use_input_mask:
__magic_name__ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :int = None
__magic_name__ :Optional[Any] = None
__magic_name__ :Union[str, Any] = None
if self.use_labels:
__magic_name__ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :Dict = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = SqueezeBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :str = model(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = SqueezeBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = SqueezeBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Union[str, Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.num_labels
__magic_name__ :int = SqueezeBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = self.num_labels
__magic_name__ :Union[str, Any] = SqueezeBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = self.num_choices
__magic_name__ :Dict = SqueezeBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ :Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ :Optional[int] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) :Tuple = config_and_inputs
__magic_name__ :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = False
a__ = True
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = SqueezeBertModelTester(self )
__magic_name__ :Optional[int] = ConfigTester(self , config_class=__lowerCAmelCase , dim=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ :int = SqueezeBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__magic_name__ :Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
__magic_name__ :Tuple = model(__lowerCAmelCase )[0]
__magic_name__ :Any = torch.Size((1, 3) )
self.assertEqual(output.shape , __lowerCAmelCase )
__magic_name__ :List[Any] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-4 ) )
| 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['transformers', 'torch', 'note_seq']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : str,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase_ ( cls : Dict,*__A : Dict,**__A : Tuple ):
requires_backends(cls,["transformers", "torch", "note_seq"] )
| 44 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
__snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class __lowerCamelCase (_a ):
_lowercase = """whisper"""
_lowercase = ["""past_key_values"""]
_lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Union[str, Any],A_: List[str]=5_1865,A_: Tuple=80,A_: List[Any]=6,A_: Dict=4,A_: Dict=6,A_: List[str]=4,A_: List[str]=1536,A_: int=1536,A_: List[str]=0.0,A_: Any=0.0,A_: List[str]=5_0257,A_: Tuple=True,A_: Dict=True,A_: Optional[Any]="gelu",A_: Tuple=256,A_: Dict=0.0,A_: List[Any]=0.0,A_: Dict=0.0,A_: int=0.0_2,A_: List[Any]=False,A_: List[str]=1500,A_: int=448,A_: Dict=5_0256,A_: Dict=5_0256,A_: List[str]=5_0256,A_: Dict=None,A_: List[Any]=[220, 5_0256],A_: Dict=False,A_: str=256,A_: Tuple=False,A_: List[Any]=0.0_5,A_: Dict=10,A_: Optional[int]=2,A_: List[str]=0.0,A_: Optional[Any]=10,A_: Union[str, Any]=0,A_: Dict=7,**A_: List[Any],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = num_mel_bins
__UpperCamelCase = d_model
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_source_positions
__UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
__UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
__UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,is_encoder_decoder=A_,decoder_start_token_id=A_,suppress_tokens=A_,begin_suppress_tokens=A_,**A_,)
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__UpperCamelCase = {0: 'batch'}
else:
__UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_,direction='inputs' )
return common_inputs
def snake_case_ ( self: Tuple,A_: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],A_: int = -1,A_: int = -1,A_: bool = False,A_: Optional["TensorType"] = None,A_: int = 2_2050,A_: float = 5.0,A_: int = 220,):
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self,preprocessor=preprocessor.feature_extractor,batch_size=A_,framework=A_,sampling_rate=A_,time_duration=A_,frequency=A_,)
__UpperCamelCase = encoder_inputs['input_features'].shape[2]
__UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer,A_,A_,A_,A_ )
__UpperCamelCase = encoder_inputs.pop('input_features' )
__UpperCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__UpperCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return 1E-3
| 1 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44 | 0 |
import os
import pytest
from attr import dataclass
UpperCAmelCase_ = """us-east-1""" # defaults region
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
a__ : str
a__ : List[str] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
a__ : List[Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
a__ : List[str] = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self : Optional[Any] ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self : List[str] ) -> str:
return f'''{self.framework}-transfromers-test'''
@property
def snake_case_ ( self : Dict ) -> str:
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def snake_case_ ( self : str ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] ) -> List[str]:
_A = SageMakerTestEnvironment(framework=request.cls.framework )
| 2 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def A_( A : float , A : float , A : float):
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if resistance == 0:
return {"resistance": sqrt(pow(A , 2) - pow(A , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(A , 2) - pow(A , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(A , 2) + pow(A , 2))}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''input_features''', '''is_longer''']
def __init__( self , _snake_case=64 , _snake_case=4_80_00 , _snake_case=4_80 , _snake_case=10 , _snake_case=10_24 , _snake_case=0.0 , _snake_case=False , _snake_case = 0 , _snake_case = 1_40_00 , _snake_case = None , _snake_case = "fusion" , _snake_case = "repeatpad" , **_snake_case , ):
"""simple docstring"""
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
lowerCAmelCase = top_db
lowerCAmelCase = truncation
lowerCAmelCase = padding
lowerCAmelCase = fft_window_size
lowerCAmelCase = (fft_window_size >> 1) + 1
lowerCAmelCase = hop_length
lowerCAmelCase = max_length_s
lowerCAmelCase = max_length_s * sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = frequency_min
lowerCAmelCase = frequency_max
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm=_snake_case , mel_scale='htk' , )
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm='slaney' , mel_scale='slaney' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = spectrogram(
_snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase = [0]
# randomly choose index for each part
lowerCAmelCase = np.random.choice(ranges[0] )
lowerCAmelCase = np.random.choice(ranges[1] )
lowerCAmelCase = np.random.choice(ranges[2] )
lowerCAmelCase = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase = torch.tensor(mel[None, None, :] )
lowerCAmelCase = torch.nn.functional.interpolate(
_snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_snake_case )
lowerCAmelCase = mel_shrink[0][0].numpy()
lowerCAmelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase = len(_snake_case ) - max_length
lowerCAmelCase = np.random.randint(0 , overflow + 1 )
lowerCAmelCase = waveform[idx : idx + max_length]
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters )
lowerCAmelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase = False
else:
lowerCAmelCase = self._random_mel_fusion(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
lowerCAmelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase = int(max_length / len(_snake_case ) )
lowerCAmelCase = np.stack(np.tile(_snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase = int(max_length / len(_snake_case ) )
lowerCAmelCase = np.stack(np.tile(_snake_case , _snake_case ) )
lowerCAmelCase = np.pad(_snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters )
lowerCAmelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = truncation if truncation is not None else self.truncation
lowerCAmelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
lowerCAmelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray(_snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase = [
self._get_input_mel(_snake_case , max_length if max_length else self.nb_max_samples , _snake_case , _snake_case )
for waveform in raw_speech
]
lowerCAmelCase = []
lowerCAmelCase = []
for mel, longer in padded_inputs:
input_mel.append(_snake_case )
is_longer.append(_snake_case )
if truncation == "fusion" and sum(_snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase = np.random.randint(0 , len(_snake_case ) )
lowerCAmelCase = True
if isinstance(input_mel[0] , _snake_case ):
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase = [[longer] for longer in is_longer]
lowerCAmelCase = {'input_features': input_mel, 'is_longer': is_longer}
lowerCAmelCase = BatchFeature(_snake_case )
if return_tensors is not None:
lowerCAmelCase = input_features.convert_to_tensors(_snake_case )
return input_features
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44 | 0 |
'''simple docstring'''
import argparse
_lowercase = """docs/source/_static/js/custom.js"""
def A (__lowerCamelCase :List[Any] ):
with open(__lowerCamelCase , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
_lowerCAmelCase = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
_lowercase = parser.parse_args()
update_custom_js(args.version)
| 5 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Any,__A : List[Any]=8_0,__A : Dict=1_6_0_0_0,__A : Tuple=0.0,__A : Dict=1_0,__A : int=2_5,__A : Union[str, Any]="hamming_window",__A : List[str]=32768.0,__A : Union[str, Any]=0.97,__A : str=1.0,__A : Union[str, Any]=True,__A : Tuple=True,__A : Optional[Any]=False,**__A : Optional[Any],):
super().__init__(feature_size=__A,sampling_rate=__A,padding_value=__A,**__A )
_lowerCamelCase : Dict = feature_size
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Any = padding_value
_lowerCamelCase : Dict = hop_length
_lowerCamelCase : Tuple = win_length
_lowerCamelCase : str = frame_signal_scale
_lowerCamelCase : List[str] = preemphasis_coeff
_lowerCamelCase : List[str] = mel_floor
_lowerCamelCase : str = normalize_means
_lowerCamelCase : Any = normalize_vars
_lowerCamelCase : List[str] = win_function
_lowerCamelCase : Tuple = return_attention_mask
_lowerCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Dict = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self : Any,__A : np.array ):
if self.win_function == "hamming_window":
_lowerCamelCase : Any = window_function(window_length=self.sample_size,name=self.win_function,periodic=__A )
else:
_lowerCamelCase : Optional[int] = window_function(window_length=self.sample_size,name=self.win_function )
_lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs,num_mel_filters=self.feature_size,min_frequency=0.0,max_frequency=self.sampling_rate / 2.0,sampling_rate=self.sampling_rate,)
_lowerCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale,window=__A,frame_length=self.sample_size,hop_length=self.sample_stride,fft_length=self.n_fft,center=__A,preemphasis=self.preemphasis_coeff,mel_filters=__A,mel_floor=self.mel_floor,log_mel="log",)
return msfc_features.T
def lowerCamelCase_ ( self : Optional[int],__A : List[str],__A : Dict,__A : int ):
# make sure we normalize float32 arrays
if self.normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_lowerCamelCase : Optional[int] = np.subtract(__A,__A )
if self.normalize_vars:
_lowerCamelCase : int = x[:input_length].std(axis=0 )
_lowerCamelCase : Any = np.divide(__A,__A )
if input_length < x.shape[0]:
_lowerCamelCase : Tuple = padding_value
# make sure array is in float32
_lowerCamelCase : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self : Any,__A : List[np.ndarray],__A : Optional[np.ndarray] = None ):
_lowerCamelCase : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A,__A,self.padding_value ) for x, n in zip(__A,__A )]
def __call__( self : Optional[Any],__A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],__A : Union[bool, str, PaddingStrategy] = False,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[bool] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[int] = None,**__A : Optional[Any],):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_lowerCamelCase : List[str] = isinstance(__A,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__A,(list, tuple) ) and (isinstance(raw_speech[0],(np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : List[Any] = [np.asarray(__A,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__A,dtype=np.floataa )
elif isinstance(__A,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [raw_speech]
# extract fbank features
_lowerCamelCase : str = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : Union[str, Any] = BatchFeature({"input_features": features} )
_lowerCamelCase : List[Any] = self.pad(
__A,padding=__A,max_length=__A,truncation=__A,pad_to_multiple_of=__A,return_attention_mask=__A,**__A,)
# make sure list is in array format
_lowerCamelCase : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0],__A ):
_lowerCamelCase : int = [np.asarray(__A,dtype=np.floataa ) for feature in input_features]
_lowerCamelCase : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowerCamelCase : Dict = [np.asarray(__A,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCamelCase : Dict = (
np.array(__A,dtype=np.intaa )
if self._get_padding_strategies(__A,max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["input_features"],attention_mask=__A )
if return_tensors is not None:
_lowerCamelCase : Dict = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 44 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Any , __A :Any , __A :Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = np.array(__A )
SCREAMING_SNAKE_CASE__ = np.array([len(__A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self :Dict , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self :str ) -> List[Any]:
"""simple docstring"""
return len(self.lengths )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.params.max_model_input_size
SCREAMING_SNAKE_CASE__ = self.lengths > max_len
logger.info(f'''Splitting {sum(__A )} too long sequences.''' )
def divide_chunks(__A :Dict , __A :Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(__A ) , __A )]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
if self.params.mlm:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE__ = np.insert(__A , 0 , __A )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE__ = np.insert(__A , len(__A ) , __A )
assert len(__A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__A )
new_tok_ids.extend(__A )
new_lengths.extend([len(__A ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE__ = np.array(__A )
SCREAMING_SNAKE_CASE__ = np.array(__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(self )
SCREAMING_SNAKE_CASE__ = self.lengths > 11
SCREAMING_SNAKE_CASE__ = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ = self.lengths[indices]
SCREAMING_SNAKE_CASE__ = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _snake_case ( self :str ) -> Any:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE__ = len(self )
SCREAMING_SNAKE_CASE__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE__ = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE__ = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ = self.lengths[indices]
SCREAMING_SNAKE_CASE__ = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self :List[Any] , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [t[0] for t in batch]
SCREAMING_SNAKE_CASE__ = [t[1] for t in batch]
assert len(__A ) == len(__A )
# Max for paddings
SCREAMING_SNAKE_CASE__ = max(__A )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids["""pad_token"""]
else:
SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE__ = [list(t.astype(__A ) ) + [pad_idx] * (max_seq_len_ - len(__A )) for t in token_ids]
assert len(tk_ ) == len(__A )
assert all(len(__A ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE__ = torch.tensor(__A ) # (bs)
return tk_t, lg_t
| 6 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : int = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
return sd
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
_lowerCamelCase : Any = OrderedDict()
_lowerCamelCase : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Dict = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : List[str] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : Optional[int] = "pretraining"
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
_lowerCamelCase : int = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : List[str] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Any = {"visual_embedding_dim": 512}
_lowerCamelCase : List[Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : Tuple = {"visual_embedding_dim": 2048}
_lowerCamelCase : Dict = "vqa_advanced"
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
_lowerCamelCase : Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
_lowerCamelCase : Optional[Any] = "nlvr"
_lowerCamelCase : str = VisualBertConfig(**_lowerCAmelCase )
# Load State Dict
_lowerCamelCase : str = load_state_dict(_lowerCAmelCase )
_lowerCamelCase : List[str] = get_new_dict(_lowerCAmelCase , _lowerCAmelCase )
if model_type == "pretraining":
_lowerCamelCase : List[Any] = VisualBertForPreTraining(_lowerCAmelCase )
elif model_type == "vqa":
_lowerCamelCase : Dict = VisualBertForQuestionAnswering(_lowerCAmelCase )
elif model_type == "nlvr":
_lowerCamelCase : Tuple = VisualBertForVisualReasoning(_lowerCAmelCase )
elif model_type == "multichoice":
_lowerCamelCase : str = VisualBertForMultipleChoice(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int ) -> bool:
'''simple docstring'''
_A = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 7 |
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : Optional[int] = 32
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int = 16 ) -> Any:
__A : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__A : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : str ):
# max_length=None => use the model max length (it's actually the default)
__A : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : int = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : int = 16
elif accelerator.mixed_precision != "no":
__A : Optional[int] = 8
else:
__A : Any = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__A : str = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__A : Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Tuple = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : int ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __snake_case ) == "1":
__A : List[str] = 2
# Initialize accelerator
__A : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Any = config['lr']
__A : Any = int(config['num_epochs'] )
__A : List[str] = int(config['seed'] )
__A : List[str] = int(config['batch_size'] )
__A : Dict = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : int ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
__A : Optional[int] = AdamW(params=model.parameters() , lr=__snake_case )
__A ,__A : Tuple = get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
__A : Tuple = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_00 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A ,__A ,__A ,__A ,__A : List[Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A : str = model(**__snake_case )
__A : Union[str, Any] = outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Tuple = model(**__snake_case )
__A : List[Any] = outputs.logits.argmax(dim=-1 )
__A ,__A : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__A : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _lowerCAmelCase ( ) -> Tuple:
__A : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__A : Optional[Any] = parser.parse_args()
__A : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 8 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase )
_lowerCamelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 847
_lowerCamelCase : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_lowerCamelCase : Optional[int] = 150
_lowerCamelCase : Union[str, Any] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_lowerCamelCase : Union[str, Any] = 171
_lowerCamelCase : str = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_lowerCamelCase : Optional[int] = 133
_lowerCamelCase : Any = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_lowerCamelCase : str = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_lowerCamelCase : List[Any] = 65
_lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json"
_lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase )
_lowerCamelCase : str = val
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: dim]
_lowerCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : Any = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Any = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
_lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :]
_lowerCamelCase : str = in_proj_bias[:config.hidden_size]
_lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :]
_lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : int = in_proj_weight[-hidden_size :, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
_lowerCamelCase : Any = prepare_img()
if "vistas" in model_name:
_lowerCamelCase : Any = 65
elif "cityscapes" in model_name:
_lowerCamelCase : Optional[Any] = 65535
else:
_lowerCamelCase : str = 255
_lowerCamelCase : List[str] = True if "ade" in model_name else False
_lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
_lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
_lowerCamelCase : Tuple = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_lowerCamelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 44 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_snake_case , 'depth_multiplier' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple=13 , _snake_case : int=3 , _snake_case : Optional[Any]=32 , _snake_case : List[str]=0.25 , _snake_case : Optional[int]=8 , _snake_case : List[Any]=8 , _snake_case : Dict=6 , _snake_case : List[str]=32 , _snake_case : Tuple=True , _snake_case : Union[str, Any]=True , _snake_case : Any=True , _snake_case : List[str]="relu6" , _snake_case : Optional[int]=12_80 , _snake_case : List[Any]=0.1 , _snake_case : Optional[int]=0.02 , _snake_case : List[Any]=True , _snake_case : List[Any]=True , _snake_case : List[str]=10 , _snake_case : str=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = depth_multiplier
A__ = depth_divisible_by
A__ = min_depth
A__ = expand_ratio
A__ = tf_padding
A__ = output_stride
A__ = first_layer_is_expansion
A__ = finegrained_output
A__ = hidden_act
A__ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
A__ = classifier_dropout_prob
A__ = use_labels
A__ = is_training
A__ = num_labels
A__ = initializer_range
A__ = scope
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self : Optional[Any] ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : Tuple , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = MobileNetVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _a ( self : List[str] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = MobileNetVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = MobileNetVaForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A__ : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Any = False
A__ : Optional[Any] = False
A__ : str = False
def _a ( self : List[str] ):
"""simple docstring"""
A__ = MobileNetVaModelTester(self )
A__ = MobileNetVaConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Any , _snake_case : List[str] , _snake_case : Any ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = 16
self.assertEqual(len(_snake_case ) , _snake_case )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@slow
def _a ( self : Dict ):
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = MobileNetVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Tuple:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
A__ = model.to(_snake_case )
A__ = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
A__ = outputs.logits
# verify the logits
A__ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _snake_case )
A__ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
| 9 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = range(2, 20 + 1)
UpperCAmelCase_ : str = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
_lowerCamelCase : List[str] = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
_lowerCamelCase , _lowerCamelCase : int = 0, 0
_lowerCamelCase : Dict = n - i
_lowerCamelCase : int = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_lowerCamelCase : List[str] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Any = _k
break
if max_jump >= 0:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : str = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
_lowerCamelCase , _lowerCamelCase : List[Any] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : int = []
else:
_lowerCamelCase : Tuple = {c: []}
_lowerCamelCase : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase , _lowerCamelCase : Optional[int] = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase , _lowerCamelCase : List[str] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_lowerCamelCase : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : List[str] = i
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : List[str] = 0
for j in range(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = a_i[j] + addend
_lowerCamelCase , _lowerCamelCase : Any = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : Tuple = digits[j] + addend
if s >= 10:
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(_lowerCAmelCase , 10 )
_lowerCamelCase : Any = addend // 10 + quotient
else:
_lowerCamelCase : Tuple = s
_lowerCamelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase , _lowerCamelCase : str = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_lowerCamelCase : Tuple = [1]
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : List[str] = 0
while True:
_lowerCamelCase , _lowerCamelCase : Dict = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : Optional[Any] = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
import math
def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
_UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether tp freeze the encoder.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowerCAmelCase_ = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Source language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'Target language id for translation.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': '# num_beams to use for evaluation.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F'{split}_results.json' ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : Tuple = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCamelCase : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Any = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCamelCase : int = SeqaSeqDataset
# Get datasets
_lowerCamelCase : Tuple = (
dataset_class(
_lowerCAmelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowerCamelCase : List[Any] = (
dataset_class(
_lowerCAmelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCamelCase : Optional[int] = (
dataset_class(
_lowerCAmelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCamelCase : int = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
_lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowerCamelCase : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCamelCase : int = train_result.metrics
_lowerCamelCase : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="val" )
_lowerCamelCase : Dict = data_args.n_val
_lowerCamelCase : List[Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowerCamelCase : Any = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix="test" )
_lowerCamelCase : Dict = test_output.metrics
_lowerCamelCase : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCamelCase : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
_lowerCamelCase : List[str] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 44 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowercase_ = 1.0_54_57_18_17e-34 # unit of ℏ : J * s
lowercase_ = 3e8 # unit of c : m * s^-1
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if (force, area, distance).count(0) != 1:
raise ValueError('''One and only one argument must be 0''')
if force < 0:
raise ValueError('''Magnitude of force can not be negative''')
if distance < 0:
raise ValueError('''Distance can not be negative''')
if area < 0:
raise ValueError('''Area can not be negative''')
if force == 0:
_a = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_a = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_a = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''')
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : str,__A : List[str]=1_3,__A : str=3_2,__A : Tuple=2,__A : Any=3,__A : Dict=1_6,__A : Dict=[3_2, 6_4, 1_2_8],__A : List[str]=[1, 2, 1],__A : str=[2, 2, 4],__A : Optional[int]=2,__A : Dict=2.0,__A : str=True,__A : Tuple=0.0,__A : int=0.0,__A : List[str]=0.1,__A : Any="gelu",__A : List[Any]=False,__A : Optional[Any]=True,__A : List[str]=0.02,__A : Tuple=1e-5,__A : Any=True,__A : Tuple=None,__A : Tuple=True,__A : Tuple=1_0,__A : List[Any]=8,__A : Optional[int]=["stage1", "stage2"],__A : int=[1, 2],):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[str] = window_size
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Any = qkv_bias
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = use_absolute_embeddings
_lowerCamelCase : List[Any] = patch_norm
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Tuple = scope
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = encoder_stride
_lowerCamelCase : Any = out_features
_lowerCamelCase : Any = out_indices
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
return FocalNetConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,embed_dim=self.embed_dim,hidden_sizes=self.hidden_sizes,depths=self.depths,num_heads=self.num_heads,window_size=self.window_size,mlp_ratio=self.mlp_ratio,qkv_bias=self.qkv_bias,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,drop_path_rate=self.drop_path_rate,hidden_act=self.hidden_act,use_absolute_embeddings=self.use_absolute_embeddings,path_norm=self.patch_norm,layer_norm_eps=self.layer_norm_eps,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,out_features=self.out_features,out_indices=self.out_indices,)
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Tuple,__A : List[Any] ):
_lowerCamelCase : Optional[Any] = FocalNetModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
_lowerCamelCase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCamelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : int,__A : Optional[int] ):
_lowerCamelCase : Any = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = FocalNetBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Dict ):
_lowerCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Dict = 1
_lowerCamelCase : Any = FocalNetForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any],__A : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : str = FocalNetForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = FocalNetModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,embed_dim=3_7,has_text_modality=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : str = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
_lowerCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple,__A : Any,__A : List[Any],__A : str,__A : Any ):
_lowerCamelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.hidden_states
_lowerCamelCase : int = getattr(
self.model_tester,"expected_num_hidden_layers",len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ),__A )
# FocalNet has a different seq_length
_lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
_lowerCamelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(__A ),__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
_lowerCamelCase : List[str] = (
reshaped_hidden_states[0].view(__A,__A,height * width ).permute(0,2,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ),[num_patches, self.model_tester.embed_dim],)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCamelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCamelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Optional[Any] = True
self.check_hidden_states_output(__A,__A,__A,(padded_height, padded_width) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = FocalNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__A )
# verify the logits
_lowerCamelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(),2_8_1 )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = FocalNetConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FocalNetModelTester(self )
| 44 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCAmelCase : List[Any] = 'pixel_values'
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[Any] = TimmBackboneConfig
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(self , """timm""")
super().__init__(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""")
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.')
if hasattr(SCREAMING_SNAKE_CASE_ , """out_features""") and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""")
lowercase__ : Any = getattr(SCREAMING_SNAKE_CASE_ , """use_pretrained_backbone""" , SCREAMING_SNAKE_CASE_)
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""")
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ : Optional[int] = config.out_indices if getattr(SCREAMING_SNAKE_CASE_ , """out_indices""" , SCREAMING_SNAKE_CASE_) is not None else (-1,)
lowercase__ : List[str] = timm.create_model(
config.backbone , pretrained=SCREAMING_SNAKE_CASE_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ : List[str] = self._backbone.return_layers
lowercase__ : Union[str, Any] = {layer["""module"""]: str(SCREAMING_SNAKE_CASE_) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(SCREAMING_SNAKE_CASE_)
@classmethod
def lowercase__ ( cls , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
requires_backends(cls , ["""vision""", """timm"""])
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ : Dict = kwargs.pop("""config""" , TimmBackboneConfig())
lowercase__ : List[str] = kwargs.pop("""use_timm_backbone""" , SCREAMING_SNAKE_CASE_)
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""")
lowercase__ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels)
lowercase__ : Optional[int] = kwargs.pop("""features_only""" , config.features_only)
lowercase__ : int = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone)
lowercase__ : Any = kwargs.pop("""out_indices""" , config.out_indices)
lowercase__ : List[str] = TimmBackboneConfig(
backbone=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , features_only=SCREAMING_SNAKE_CASE_ , use_pretrained_backbone=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , )
return super()._from_config(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
pass
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ : List[str] = self._all_layers
lowercase__ : Optional[int] = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self._return_layers
lowercase__ : Optional[Any] = tuple(hidden_states[i] for i in self.out_indices)
else:
lowercase__ : int = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = None
lowercase__ : Optional[Any] = tuple(SCREAMING_SNAKE_CASE_)
lowercase__ : int = tuple(SCREAMING_SNAKE_CASE_) if hidden_states is not None else None
if not return_dict:
lowercase__ : Union[str, Any] = (feature_maps,)
if output_hidden_states:
lowercase__ : Optional[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , attentions=SCREAMING_SNAKE_CASE_)
| 12 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
__lowerCamelCase : str = int(UpperCAmelCase_ )
assert noofclusters < len(UpperCAmelCase_ )
# Find out the dimensionality
__lowerCamelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
__lowerCamelCase : Union[str, Any] = list(range(len(UpperCAmelCase_ ) ) )
shuffle(UpperCAmelCase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__lowerCamelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__lowerCamelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__lowerCamelCase : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCAmelCase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__lowerCamelCase : int = tf.placeholder('float64' , [dim] )
__lowerCamelCase : Union[str, Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCAmelCase_ , UpperCAmelCase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__lowerCamelCase : Dict = [tf.Variable(0 ) for i in range(len(UpperCAmelCase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__lowerCamelCase : int = tf.placeholder('int32' )
__lowerCamelCase : List[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCAmelCase_ , UpperCAmelCase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__lowerCamelCase : List[str] = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__lowerCamelCase : int = tf.reduce_mean(UpperCAmelCase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__lowerCamelCase : List[str] = tf.placeholder('float' , [dim] )
__lowerCamelCase : List[Any] = tf.placeholder('float' , [dim] )
__lowerCamelCase : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCAmelCase_ , UpperCAmelCase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__lowerCamelCase : List[Any] = tf.placeholder('float' , [noofclusters] )
__lowerCamelCase : Tuple = tf.argmin(UpperCAmelCase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__lowerCamelCase : Any = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCAmelCase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__lowerCamelCase : List[Any] = 1_00
for _ in range(UpperCAmelCase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCAmelCase_ ) ):
__lowerCamelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__lowerCamelCase : List[str] = [
sess.run(UpperCAmelCase_ , feed_dict={va: vect, va: sess.run(UpperCAmelCase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__lowerCamelCase : Dict = sess.run(
UpperCAmelCase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCAmelCase_ ):
# Collect all the vectors assigned to this cluster
__lowerCamelCase : Optional[Any] = [
vectors[i]
for i in range(len(UpperCAmelCase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__lowerCamelCase : Tuple = sess.run(
UpperCAmelCase_ , feed_dict={mean_input: array(UpperCAmelCase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__lowerCamelCase : str = sess.run(UpperCAmelCase_ )
__lowerCamelCase : Any = sess.run(UpperCAmelCase_ )
return centroids, assignments
| 13 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | 0 |
from math import pow
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ,__a : int ,__a : int ,) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_a : List[str] = int(pow(__a ,__a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_a , _a : Optional[Any] = backtrack(
__a ,__a ,current_number + 1 ,__a ,__a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_a , _a : List[str] = backtrack(
__a ,__a ,current_number + 1 ,__a ,__a )
return current_sum, solutions_count
def __UpperCAmelCase ( __a : int ,__a : int ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__a ,__a ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A : Union[str, Any] = random.Random()
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any]=1.0 , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
lowercase__ = global_rng
lowercase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : int=400 , _UpperCAmelCase : Tuple=2000 , _UpperCAmelCase : str=10 , _UpperCAmelCase : Union[str, Any]=160 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : int=4000 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[int]=True , ) -> Tuple:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = min_seq_length
lowercase__ = max_seq_length
lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ = padding_value
lowercase__ = sampling_rate
lowercase__ = return_attention_mask
lowercase__ = do_normalize
lowercase__ = feature_size
lowercase__ = chunk_length
lowercase__ = hop_length
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[int]=False ) -> Tuple:
"""simple docstring"""
def _flatten(_UpperCAmelCase : Any ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
lowercase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = WhisperFeatureExtractionTester(self )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
lowercase__ = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
lowercase__ = feat_extract_first.to_dict()
lowercase__ = feat_extract_second.to_dict()
lowercase__ = feat_extract_first.mel_filters
lowercase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(_UpperCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(_UpperCAmelCase )
lowercase__ = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
lowercase__ = feat_extract_first.to_dict()
lowercase__ = feat_extract_second.to_dict()
lowercase__ = feat_extract_first.mel_filters
lowercase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase__ = feature_extractor(_UpperCAmelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowercase__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test batched
lowercase__ = feature_extractor(_UpperCAmelCase , return_tensors="""np""" ).input_features
lowercase__ = feature_extractor(_UpperCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ = np.asarray(_UpperCAmelCase )
lowercase__ = feature_extractor(_UpperCAmelCase , return_tensors="""np""" ).input_features
lowercase__ = feature_extractor(_UpperCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test truncation required
lowercase__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowercase__ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
lowercase__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase__ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs_truncated]
lowercase__ = feature_extractor(_UpperCAmelCase , return_tensors="""np""" ).input_features
lowercase__ = feature_extractor(_UpperCAmelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def lowerCamelCase__ (self : List[str] ) -> Any:
"""simple docstring"""
import torch
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = np.random.rand(100 , 32 ).astype(np.floataa )
lowercase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowercase__ = ds.sort("""id""" ).select(range(_UpperCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowercase__ = self._load_datasamples(1 )
lowercase__ = WhisperFeatureExtractor()
lowercase__ = feature_extractor(_UpperCAmelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _UpperCAmelCase , atol=1E-4 ) )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = self._load_datasamples(1 )[0]
lowercase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
lowercase__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_UpperCAmelCase )[0]
self.assertTrue(np.all(np.mean(_UpperCAmelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase ) - 1 ) < 1E-3 ) )
| 15 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,)
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : str = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Union[str, Any] = GPTSwaTokenizer
_lowercase : Dict = False
_lowercase : Tuple = True
_lowercase : Union[str, Any] = False
def lowerCAmelCase_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
__A : Tuple = GPTSwaTokenizer(__A , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Tuple , __A : int ):
__A : Optional[Any] = """This is a test"""
__A : str = """This is a test"""
return input_text, output_text
def lowerCAmelCase_ ( self : Optional[int] ):
__A : List[str] = """<s>"""
__A : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def lowerCAmelCase_ ( self : str ):
__A : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__A ) , 2000 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowerCAmelCase_ ( self : List[Any] ):
__A : Optional[int] = GPTSwaTokenizer(__A )
__A : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [465, 287, 265, 631, 842] )
__A : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__A , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
__A : Optional[int] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__A : Union[str, Any] = tokenizer.convert_ids_to_tokens(__A )
# fmt: off
self.assertListEqual(
__A , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCAmelCase_ ( self : List[Any] ):
__A : Union[str, Any] = GPTSwaTokenizer(__A )
__A : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__A : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__A , __A ):
self.assertListEqual(tokenizer.encode_fast(__A ) , __A )
# Test that decode_fast returns the input text
for text, token_ids in zip(__A , __A ):
self.assertEqual(tokenizer.decode_fast(__A ) , __A )
@slow
def lowerCAmelCase_ ( self : Tuple ):
__A : Any = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__A : Any = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__A , )
| 17 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : List[str] = {
'allenai/led-base-16384': 1_6384,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],__A : List[Any]=None,__A : str=None,__A : str=None,__A : Optional[int]="replace",__A : Union[str, Any]="<s>",__A : Union[str, Any]="</s>",__A : Any="</s>",__A : Optional[int]="<s>",__A : List[str]="<unk>",__A : str="<pad>",__A : Tuple="<mask>",__A : Union[str, Any]=False,__A : Optional[int]=True,**__A : Optional[int],):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Tuple = pre_tok_class(**__A )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = "post_processor"
_lowerCamelCase : int = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : str = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : Dict = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : List[Any] = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : List[str] = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Any = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : str ):
_lowerCamelCase : Optional[Any] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : str = value
def lowerCamelCase_ ( self : List[str],*__A : List[Any],**__A : int ):
_lowerCamelCase : List[str] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : Union[str, Any] ):
_lowerCamelCase : List[Any] = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : List[str] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : List[str]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Any,__A : Union[Dict[str, EncodedInput], BatchEncoding],__A : Optional[int] = None,__A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,__A : Optional[int] = None,__A : Optional[bool] = None,):
_lowerCamelCase : List[str] = super()._pad(
encoded_inputs=__A,max_length=__A,padding_strategy=__A,pad_to_multiple_of=__A,return_attention_mask=__A,)
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : Any = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
_lowerCamelCase : str = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 44 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
_lowerCAmelCase = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCAmelCase = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCAmelCase = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCAmelCase = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCAmelCase = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCAmelCase = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCAmelCase = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCAmelCase = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCAmelCase = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCAmelCase = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCAmelCase = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCAmelCase = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCAmelCase = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCAmelCase = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCAmelCase = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCAmelCase = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCAmelCase = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCAmelCase = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCAmelCase = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCAmelCase = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCAmelCase = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCAmelCase = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCAmelCase = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCAmelCase = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase = key.split("." )
_lowerCAmelCase , _lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
_lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase = key.split("." )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[
dim : dim * 2, :
]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCAmelCase = val.squeeze_()
else:
_lowerCAmelCase = val
return orig_state_dict
def __a():
'''simple docstring'''
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
'''simple docstring'''
_lowerCAmelCase = GroupViTConfig()
_lowerCAmelCase = GroupViTModel(SCREAMING_SNAKE_CASE_ ).eval()
_lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
_lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE_ ) == 0)
# verify result
_lowerCAmelCase = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(text=["a photo of a cat", "a photo of a dog"] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
with torch.no_grad():
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
if model_name == "groupvit-gcc-yfcc":
_lowerCAmelCase = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCAmelCase = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("Successfully saved processor and model to" , SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 18 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_UpperCamelCase = 6
_UpperCamelCase = 1
_UpperCamelCase = 19_01
_UpperCamelCase = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_UpperCamelCase = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 19 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( _lowerCAmelCase : int = 5000 ):
"""simple docstring"""
_lowerCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = pentagonal_nums[j]
_lowerCamelCase : Any = pentagonal_i + pentagonal_j
_lowerCamelCase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.